@Test public void shutdownDuringDelete() throws Exception { assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable").waitFor()); Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD); sleepUninterruptibly(100, TimeUnit.MILLISECONDS); assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); deleter.destroy(); }
private MiniAccumuloCluster(MiniAccumuloConfigImpl config) throws IOException { impl = new MiniAccumuloClusterImpl(config); }
private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException { // kill gc started by MAC getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next()); // delete lock in zookeeper if there, this will allow next GC to start quickly String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK; ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET); try { ZooLock.deleteLock(zk, path); } catch (IllegalStateException e) { } assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR)); }
private void restartTServer() throws Exception { for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.start(); }
public Map<ServerType,Collection<ProcessReference>> getProcesses() { Map<ServerType,Collection<ProcessReference>> result = new HashMap<>(); MiniAccumuloClusterControl control = getClusterControl(); result.put(ServerType.MASTER, references(control.masterProcess)); result.put(ServerType.TABLET_SERVER, references(control.tabletServerProcesses.toArray(new Process[0]))); if (null != control.zooKeeperProcess) { result.put(ServerType.ZOOKEEPER, references(control.zooKeeperProcess)); } if (null != control.gcProcess) { result.put(ServerType.GARBAGE_COLLECTOR, references(control.gcProcess)); } return result; }
@Test public void testExistingInstance() throws Exception { Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD)); conn.tableOperations().flush(RootTable.NAME, null, null, true); Set<Entry<ServerType,Collection<ProcessReference>>> procs = getCluster().getProcesses() .entrySet(); for (Entry<ServerType,Collection<ProcessReference>> entry : procs) { continue; for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr); getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey())); IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter( getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET)); final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID(); while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) { new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir); MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2); accumulo2.start(); conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD)); accumulo2.stop();
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop(); new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); cluster.start(); Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); conn.tableOperations().compact(tableNames[0], null, null, true, true); ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000); String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH; String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
String javaBin = javaHome + File.separator + "bin" + File.separator + "java"; String classpath = System.getProperty("java.class.path"); classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath; String className = TabletServer.class.getName(); ArrayList<String> argList = new ArrayList<>(); ProcessBuilder builder = new ProcessBuilder(argList); Map<String,String> env = builder.environment(); env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath()); env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath()); String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE"; env.put("TRICK_FILE", trickFilename); String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so"; sleepUninterruptibly(1, TimeUnit.SECONDS); cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next()); sleepUninterruptibly(1, TimeUnit.SECONDS); c.tableOperations().create("test_ingest"); assertEquals(1, c.instanceOperations().getTabletServers().size()); int rows = 100 * 1000; ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows + ""); sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID(); Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop(); new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml")); v1.toString() + "," + v2.toString() + "," + v3.toString()); BufferedOutputStream fos = new BufferedOutputStream( new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml"))); conf.writeXml(fos); fos.close(); Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor()); cluster.start();
@Override public ClientConfiguration getClientConfig() { return ClientConfiguration.fromMap(config.getSiteConfig()).withInstance(this.getInstanceName()) .withZkHosts(this.getZooKeepers()); }
@Override public Entry<Integer,String> execWithStdout(Class<?> clz, String[] args) throws IOException { Process p = cluster.exec(clz, args); int exitCode; try { exitCode = p.waitFor(); } catch (InterruptedException e) { log.warn("Interrupted waiting for process to exit", e); Thread.currentThread().interrupt(); throw new IOException(e); } for (LogWriter writer : cluster.getLogWriters()) { writer.flush(); } return Maps.immutableEntry(exitCode, readAll(new FileInputStream(cluster.getConfig().getLogDir() + "/" + clz.getSimpleName() + "_" + p.hashCode() + ".out"))); }
public Process exec(Class<?> clazz, String... args) throws IOException { return exec(clazz, null, args); }
@Override public Void run() throws Exception { mac.getConnector("some_other_user", delegationToken); return null; } });
public static String readAll(MiniAccumuloClusterImpl c, Class<?> klass, Process p) throws Exception { for (LogWriter writer : c.getLogWriters()) writer.flush(); return readAll(new FileInputStream( c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out")); }
@Override public void run() { try { int i = 0; while (!stop.get()) { sleepUninterruptibly(10, TimeUnit.SECONDS); System.out.println("Restarting"); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().start(); // read the metadata table to know everything is back up Iterators.size( getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator()); i++; } System.out.println("Restarted " + i + " times"); } catch (Exception ex) { log.error("{}", ex.getMessage(), ex); } } };
new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir); .println("conf " + new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml")); MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2); try { accumulo2.start(); Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance"); } catch (RuntimeException e) {
Connector connector = getConnector(); String tableName = getUniqueNames(1)[0]; ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName); ReadWriteIT.verify(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName); Path rfile = new Path(entry.getKey().getColumnQualifier().toString()); log.debug("Removing rfile '" + rfile + "'"); cluster.getFileSystem().delete(rfile, false); Process info = cluster.exec(CreateEmpty.class, rfile.toString()); assertEquals(0, info.waitFor());
/** * @since 1.6.0 */ public ClientConfiguration getClientConfig() { return impl.getClientConfig(); } }
@Before public void startMac() throws Exception { MiniClusterHarness harness = new MiniClusterHarness(); mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() { @Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) { Map<String,String> site = cfg.getSiteConfig(); site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s"); cfg.setSiteConfig(site); } }); mac.getConfig().setNumTservers(1); mac.start(); // Enabled kerberos auth Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); }
private void createMiniAccumulo() throws Exception { // createTestDir will give us a empty directory, we don't need to clean it up ourselves File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName()); MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD); String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath(); String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString(); cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce); cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString()); Configuration coreSite = new Configuration(false); configure(cfg, coreSite); cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString()); configureForEnvironment(cfg, getClass(), getSslDir(baseDir)); cluster = new MiniAccumuloClusterImpl(cfg); if (coreSite.size() > 0) { File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml"); if (csFile.exists()) { coreSite.addResource(new Path(csFile.getAbsolutePath())); } File tmp = new File(csFile.getAbsolutePath() + ".tmp"); OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp)); coreSite.writeXml(out); out.close(); assertTrue(tmp.renameTo(csFile)); } beforeClusterStart(cfg); }