/** * @return the base directory of the cluster configuration */ public File getDir() { return impl.getDir(); }
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { File baseDir = cfg.getDir(); volDirBase = new File(baseDir, "volumes"); File v1f = new File(volDirBase, "v1"); File v2f = new File(volDirBase, "v2"); v1 = new Path("file://" + v1f.getAbsolutePath()); v2 = new Path("file://" + v2f.getAbsolutePath()); // Run MAC on two locations in the local file system cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString()); // use raw local file system so walogs sync and flush will work hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); }
public static File getMiniClusterDir() { return cluster.getConfig().getDir(); }
@Override public Path getTemporaryPath() { if (config.useMiniDFS()) { return new Path("/tmp/"); } else { File tmp = new File(config.getDir(), "tmp"); mkdirs(tmp); return new Path(tmp.toString()); } }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { File baseDir = cfg.getDir(); File volDirBase = new File(baseDir, "volumes"); File v1f = new File(volDirBase, "v1"); File v2f = new File(volDirBase, "v2"); v1 = new Path("file://" + v1f.getAbsolutePath()); v2 = new Path("file://" + v2f.getAbsolutePath()); // Use a VolumeChooser which should be more fair cfg.setProperty(Property.GENERAL_VOLUME_CHOOSER, FairVolumeChooser.class.getName()); // Run MAC on two locations in the local file system cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString()); hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); super.configure(cfg, hadoopCoreSite); }
private int countFiles() throws Exception { Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf"); return Iterators.size(Arrays.asList(cluster.getFileSystem().globStatus(path)).iterator()); }
@SuppressWarnings("deprecation") @Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { File baseDir = cfg.getDir(); volDirBase = new File(baseDir, "volumes"); File v1f = new File(volDirBase, "v1"); File v2f = new File(volDirBase, "v2"); v1 = new Path("file://" + v1f.getAbsolutePath()); v2 = new Path("file://" + v2f.getAbsolutePath()); // Run MAC on two locations in the local file system URI v1Uri = v1.toUri(); cfg.setProperty(Property.INSTANCE_DFS_DIR, v1Uri.getPath()); cfg.setProperty(Property.INSTANCE_DFS_URI, v1Uri.getScheme() + v1Uri.getHost()); cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString()); cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s"); // use raw local file system so walogs sync and flush will work hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); super.configure(cfg, hadoopCoreSite); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { // Get 2 tablet servers cfg.setNumTservers(2); namespace1 = "ns_" + getUniqueNames(2)[0]; namespace2 = "ns_" + getUniqueNames(2)[1]; // Set the general volume chooser to the PerTableVolumeChooser so that different choosers can be // specified Map<String,String> siteConfig = new HashMap<>(); siteConfig.put(Property.GENERAL_VOLUME_CHOOSER.getKey(), PerTableVolumeChooser.class.getName()); cfg.setSiteConfig(siteConfig); // Set up 4 different volume paths File baseDir = cfg.getDir(); volDirBase = new File(baseDir, "volumes"); File v1f = new File(volDirBase, "v1"); File v2f = new File(volDirBase, "v2"); File v3f = new File(volDirBase, "v3"); File v4f = new File(volDirBase, "v4"); v1 = new Path("file://" + v1f.getAbsolutePath()); v2 = new Path("file://" + v2f.getAbsolutePath()); v3 = new Path("file://" + v3f.getAbsolutePath()); v4 = new Path("file://" + v4f.getAbsolutePath()); // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in // the options list when they are choosing cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString() + "," + v4.toString()); // use raw local file system so walogs sync and flush will work hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); super.configure(cfg, hadoopCoreSite); }
@Before public void checkCluster() { Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI)); MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster(); rootPath = mac.getConfig().getDir().getAbsolutePath(); }
builder.environment().put("ACCUMULO_HOME", config.getDir().getAbsolutePath()); builder.environment().put("ACCUMULO_LOG_DIR", config.getLogDir().getAbsolutePath()); builder.environment().put("ACCUMULO_CLIENT_CONF_PATH", builder.environment().put("ACCUMULO_CONF_DIR", config.getConfDir().getAbsolutePath()); builder.environment().put("HADOOP_HOME", config.getDir().getAbsolutePath()); if (config.getHadoopConfDir() != null) builder.environment().put("HADOOP_CONF_DIR", config.getHadoopConfDir().getAbsolutePath());
@Test public void bulk() throws Exception { BulkIT.runTest(getConnector(), cluster.getFileSystem(), new Path(getCluster().getConfig().getDir().getAbsolutePath(), "tmp"), "root", getUniqueNames(1)[0], this.getClass().getName(), testName.getMethodName()); }
String javaBin = javaHome + File.separator + "bin" + File.separator + "java"; String classpath = System.getProperty("java.class.path"); classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath; String className = TabletServer.class.getName(); ArrayList<String> argList = new ArrayList<>(); ProcessBuilder builder = new ProcessBuilder(argList); Map<String,String> env = builder.environment(); env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath()); env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath()); String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
switch (getClusterType()) { case MINI: dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir() .getAbsolutePath(); break;
if (this.getDir().exists() && !this.getDir().isDirectory()) throw new IllegalArgumentException("Must pass in directory, " + this.getDir() + " is a file"); if (this.getDir().exists()) { String[] children = this.getDir().list(); if (children != null && children.length != 0) { throw new IllegalArgumentException("Directory " + this.getDir() + " is not empty");
File exportDir = new File(getCluster().getConfig().getDir().toString() + "/export");
Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl); MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster; String rootPath = mac.getConfig().getDir().getAbsolutePath();
String rootPath = mac.getConfig().getDir().getAbsolutePath();