/** * * @param dir * An empty or nonexistant temp directoy that Accumulo and Zookeeper can store data in. * Creating the directory is left to the user. Java 7, Guava, and Junit provide methods * for creating temporary directories. * @param rootPassword * Initial root password for instance. */ public MiniAccumuloClusterImpl(File dir, String rootPassword) throws IOException { this(new MiniAccumuloConfigImpl(dir, rootPassword)); }
/** * @return name of configured instance * * @since 1.6.0 */ public String getInstanceName() { return impl.getInstanceName(); }
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { Map<String,String> siteConfig = cfg.getSiteConfig(); siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K"); siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0"); cfg.setSiteConfig(siteConfig); // ensure we have two tservers if (cfg.getNumTservers() < 2) { cfg.setNumTservers(2); } }
@Override protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(1); cfg.useMiniDFS(true); cfg.setProperty(Property.GC_FILE_ARCHIVE, "false"); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(1); cfg.setDefaultMemory(cfg.getDefaultMemory() * 2, MemoryUnit.BYTE); cfg.useMiniDFS(); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration fsConf) { cfg.setProperty(Property.TABLE_SUSPEND_DURATION, SUSPEND_DURATION + "ms"); cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s"); cfg.setNumTservers(TSERVERS); }
if (config.useMiniDFS() && miniDFS == null) { throw new IllegalStateException("Cannot restart mini when using miniDFS"); if (config.useExistingInstance()) { Configuration acuConf = config.getAccumuloConfiguration(); Configuration hadoopConf = config.getHadoopConfiguration(); config.setInstanceName(instanceName); if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath)) throw new RuntimeException( if (!config.useExistingZooKeepers()) control.start(ServerType.ZOOKEEPER); if (!config.useExistingZooKeepers()) { Socket s = null; try { s = new Socket("localhost", config.getZooKeeperPort()); s.setReuseAddress(true); s.getOutputStream().write("ruok\n".getBytes()); break; } catch (Exception e) { if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) { throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in " + config.getLogDir() + " for errors. Last exception: " + e);
this.config = config.initialize(); mkdirs(config.getConfDir()); mkdirs(config.getLogDir()); mkdirs(config.getLibDir()); mkdirs(config.getLibExtDir()); if (!config.useExistingInstance()) { if (!config.useExistingZooKeepers()) mkdirs(config.getZooKeeperDir()); mkdirs(config.getWalogDir()); mkdirs(config.getAccumuloDir()); if (config.useMiniDFS()) { File nn = new File(config.getAccumuloDir(), "nn"); mkdirs(nn); File dn = new File(config.getAccumuloDir(), "dn"); mkdirs(dn); File dfs = new File(config.getAccumuloDir(), "dfs"); mkdirs(dfs); Configuration conf = new Configuration(); InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress(); dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort(); File coreFile = new File(config.getConfDir(), "core-site.xml"); writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet()); File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml"); writeConfig(hdfsFile, conf);
@Test(timeout = 10 * 60 * 1000) public void dataWasReplicatedToThePeer() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
final Map<String, String> siteConfig = config.getSiteConfig(); siteConfig.put(Property.INSTANCE_ZK_HOST.getKey(), zookeeper); config.setSiteConfig(siteConfig); args.add(config.getInstanceName()); args.add("--password"); args.add(config.getRootPassword()); + ret + ". Check the logs in " + config.getLogDir() + " for errors."); + config.getInstanceName() + " and zookeeper(s) " + config.getZooKeepers()); for (int i = 0; i < config.getNumTservers(); i++) { cleanup.add(miniAccumulo.exec(TabletServer.class, jvmArgs));
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { Map<String,String> siteConfig = cfg.getSiteConfig(); siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s"); cfg.setSiteConfig(siteConfig); }
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(1); Map<String,String> siteConfig = cfg.getSiteConfig(); siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms"); cfg.setSiteConfig(siteConfig); }
protected void configureForSsl(MiniAccumuloConfigImpl cfg, File folder) { Map<String,String> siteConfig = cfg.getSiteConfig(); if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) { // already enabled; don't mess with it return; } File sslDir = new File(folder, "ssl"); assertTrue(sslDir.mkdirs() || sslDir.isDirectory()); File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks"); File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks"); File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks"); final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password"; try { new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption") .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(), truststorePassword); } catch (Exception e) { throw new RuntimeException("error creating MAC keystore", e); } siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true"); siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath()); siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword()); siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath()); siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword); cfg.setSiteConfig(siteConfig); }
argList.addAll(Arrays.asList(javaBin, "-Dproc=" + clazz.getSimpleName(), "-cp", classpath)); argList.addAll(extraJvmOpts); for (Entry<String,String> sysProp : config.getSystemProperties().entrySet()) { argList.add(String.format("-D%s=%s", sysProp.getKey(), sysProp.getValue())); builder.environment().put("ACCUMULO_HOME", config.getDir().getAbsolutePath()); builder.environment().put("ACCUMULO_LOG_DIR", config.getLogDir().getAbsolutePath()); builder.environment().put("ACCUMULO_CLIENT_CONF_PATH", config.getClientConfFile().getAbsolutePath()); String ldLibraryPath = Joiner.on(File.pathSeparator).join(config.getNativeLibPaths()); builder.environment().put("LD_LIBRARY_PATH", ldLibraryPath); builder.environment().put("DYLD_LIBRARY_PATH", ldLibraryPath); if (env != null) builder.environment().put("ZOOKEEPER_HOME", env); builder.environment().put("ACCUMULO_CONF_DIR", config.getConfDir().getAbsolutePath()); builder.environment().put("HADOOP_HOME", config.getDir().getAbsolutePath()); if (config.getHadoopConfDir() != null) builder.environment().put("HADOOP_CONF_DIR", config.getHadoopConfDir().getAbsolutePath()); new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".err")); logWriters.add(lw); lw.start(); lw = new LogWriter(process.getInputStream(), new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".out")); logWriters.add(lw); lw.start();
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setDefaultMemory(1, MemoryUnit.GIGABYTE); cfg.setNumTservers(1); Map<String,String> siteConfig = cfg.getSiteConfig(); siteConfig.put(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "10M"); cfg.setSiteConfig(siteConfig); }
MiniAccumuloClusterImpl master1Cluster; while (true) { master1Cfg = new MiniAccumuloConfigImpl(master1Dir, password); master1Cfg.setNumTservers(1); master1Cfg.setInstanceName("master1"); master1Cfg.setProperty(Property.REPLICATION_NAME, master1Cfg.getInstanceName()); master1Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M"); master1Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m"); master1Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s"); master1Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s"); master1Cluster = new MiniAccumuloClusterImpl(master1Cfg); setCoreSite(master1Cluster); break; } catch (ZooKeeperBindException e) { log.warn("Failed to start ZooKeeper on " + master1Cfg.getZooKeeperPort() + ", will retry"); MiniAccumuloClusterImpl master2Cluster; while (true) { master2Cfg = new MiniAccumuloConfigImpl(master2Dir, password); master2Cfg.setNumTservers(1); master2Cfg.setInstanceName("master2"); master2Cfg.setProperty(Property.REPLICATION_NAME, master2Cfg.getInstanceName()); master2Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M"); master2Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m"); master2Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s"); master2Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE); Map<String,String> siteConfig = cfg.getSiteConfig(); siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms"); cfg.setSiteConfig(siteConfig); }
if (this.getDir().exists() && !this.getDir().isDirectory()) throw new IllegalArgumentException("Must pass in directory, " + this.getDir() + " is a file"); if (this.getDir().exists()) { String[] children = this.getDir().list(); if (children != null && children.length != 0) { throw new IllegalArgumentException("Directory " + this.getDir() + " is not empty"); setInstanceLocation(); mergeProp(Property.INSTANCE_SECRET.getKey(), DEFAULT_INSTANCE_SECRET); mergeProp(Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey() + "password", getRootPassword()); mergeProp(Property.TSERV_PORTSEARCH.getKey(), "true"); mergeProp(Property.TSERV_DATACACHE_SIZE.getKey(), "10M"); mergeProp(Property.TSERV_INDEXCACHE_SIZE.getKey(), "10M"); mergeProp(Property.TSERV_MAXMEM.getKey(), "50M"); mergeProp(Property.TSERV_WALOG_MAX_SIZE.getKey(), "100M"); mergeProp(Property.TSERV_NATIVEMAP_ENABLED.getKey(), "false"); mergeProp(Property.TSERV_MAJC_DELAY.getKey(), "3"); mergeProp(Property.GENERAL_CLASSPATHS.getKey(), libDir.getAbsolutePath() + "/[^.].*[.]jar"); mergeProp(Property.GENERAL_DYNAMIC_CLASSPATHS.getKey(), libExtDir.getAbsolutePath() + "/[^.].*[.]jar"); mergeProp(Property.GC_CYCLE_DELAY.getKey(), "4s"); mergeProp(Property.GC_CYCLE_START.getKey(), "0s"); mergePropWithRandomPort(Property.MASTER_CLIENTPORT.getKey()); mergePropWithRandomPort(Property.TRACE_PORT.getKey()); mergePropWithRandomPort(Property.TSERV_CLIENTPORT.getKey()); mergePropWithRandomPort(Property.MONITOR_PORT.getKey());
private void createMiniAccumulo() throws Exception { // createTestDir will give us a empty directory, we don't need to clean it up ourselves File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()); MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD); String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath(); String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString(); cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce); cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString()); Configuration coreSite = new Configuration(false); configure(cfg, coreSite); cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString()); configureForEnvironment(cfg, getClass(), getSslDir(baseDir)); cluster = new MiniAccumuloClusterImpl(cfg); if (coreSite.size() > 0) { File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml"); if (csFile.exists()) { coreSite.addResource(new Path(csFile.getAbsolutePath())); } File tmp = new File(csFile.getAbsolutePath() + ".tmp"); OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp)); coreSite.writeXml(out); out.close(); assertTrue(tmp.renameTo(csFile)); } beforeClusterStart(cfg); }
getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey())); IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter( getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET)); FileUtils.deleteQuietly(testDir2); MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused"); macConfig2.useExistingInstance( new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);