public Process exec(Class<?> clazz, String... args) throws IOException { return exec(clazz, null, args); }
@Override public int exec(Class<?> clz, String[] args) throws IOException { Process p = cluster.exec(clz, args); int exitCode; try { exitCode = p.waitFor(); } catch (InterruptedException e) { log.warn("Interrupted waiting for process to exit", e); Thread.currentThread().interrupt(); throw new IOException(e); } return exitCode; }
@Override public void adminStopAll() throws IOException { Process p = cluster.exec(Admin.class, "stopAll"); try { p.waitFor(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException(e); } if (0 != p.exitValue()) { throw new IOException("Failed to run `accumulo admin stopAll`"); } }
/** * Starts the thrift proxy using the given MAConfig. * * @param cfg * configuration for MAC * @return Process for the thrift proxy */ private Process startProxy(MiniAccumuloConfigImpl cfg) throws IOException { File proxyPropertiesFile = generateNewProxyConfiguration(cfg); return mac.exec(Proxy.class, "-p", proxyPropertiesFile.getCanonicalPath()); }
protected Process exec(Class<?> clazz, String... args) throws IOException { return getCluster().exec(clazz, args); }
@Test public void test() throws Exception { Process zombie = cluster.exec(ZombieTServer.class); assertEquals(0, zombie.waitFor()); }
@Test public void stopDuringStart() throws Exception { assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); }
@Test public void shutdownDuringIngest() throws Exception { Process ingest = cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable"); sleepUninterruptibly(100, TimeUnit.MILLISECONDS); assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); ingest.destroy(); }
@Test public void shutdownDuringDelete() throws Exception { assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable").waitFor()); Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD); sleepUninterruptibly(100, TimeUnit.MILLISECONDS); assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); deleter.destroy(); }
@Override public Entry<Integer,String> execWithStdout(Class<?> clz, String[] args) throws IOException { Process p = cluster.exec(clz, args); int exitCode; try { exitCode = p.waitFor(); } catch (InterruptedException e) { log.warn("Interrupted waiting for process to exit", e); Thread.currentThread().interrupt(); throw new IOException(e); } for (LogWriter writer : cluster.getLogWriters()) { writer.flush(); } return Maps.immutableEntry(exitCode, readAll(new FileInputStream(cluster.getConfig().getLogDir() + "/" + clz.getSimpleName() + "_" + p.hashCode() + ".out"))); }
@Test public void shutdownDuringQuery() throws Exception { assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable").waitFor()); Process verify = cluster.exec(VerifyIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD); sleepUninterruptibly(100, TimeUnit.MILLISECONDS); assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); verify.destroy(); }
static void runAdminStopTest(Connector c, MiniAccumuloClusterImpl cluster) throws InterruptedException, IOException { assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable").waitFor()); List<String> tabletServers = c.instanceOperations().getTabletServers(); assertEquals(2, tabletServers.size()); String doomed = tabletServers.get(0); log.info("Stopping " + doomed); assertEquals(0, cluster.exec(Admin.class, "stop", doomed).waitFor()); tabletServers = c.instanceOperations().getTabletServers(); assertEquals(1, tabletServers.size()); assertFalse(tabletServers.get(0).equals(doomed)); }
@Test public void shutdownDuringDeleteTable() throws Exception { final Connector c = getConnector(); for (int i = 0; i < 10; i++) { c.tableOperations().create("table" + i); } final AtomicReference<Exception> ref = new AtomicReference<>(); Thread async = new Thread() { @Override public void run() { try { for (int i = 0; i < 10; i++) c.tableOperations().delete("table" + i); } catch (Exception ex) { ref.set(ex); } } }; async.start(); sleepUninterruptibly(100, TimeUnit.MILLISECONDS); assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); if (ref.get() != null) throw ref.get(); }
@Test public void gcLotsOfCandidatesIT() throws Exception { killMacGc(); log.info("Filling metadata table with bogus delete flags"); Connector c = getConnector(); addEntries(c, new BatchWriterOpts()); cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE); Process gc = cluster.exec(SimpleGarbageCollector.class); sleepUninterruptibly(20, TimeUnit.SECONDS); String output = ""; while (!output.contains("delete candidates has exceeded")) { byte buffer[] = new byte[10 * 1024]; try { int n = gc.getInputStream().read(buffer); output = new String(buffer, 0, n, UTF_8); } catch (IOException ex) { break; } } gc.destroy(); assertTrue(output.contains("delete candidates has exceeded")); }
if (ClusterType.MINI == getClusterType()) { MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster; trace = impl.exec(TraceServer.class); while (!c.tableOperations().exists("trace")) sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
@BeforeClass public static void setupMiniCluster() throws Exception { SharedMiniClusterBase.startMiniClusterWithConfig(new ShellServerITConfigCallback()); rootPath = getMiniClusterDir().getAbsolutePath(); // history file is updated in $HOME System.setProperty("HOME", rootPath); System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp"); traceProcess = getCluster().exec(TraceServer.class); Connector conn = getCluster().getConnector(getPrincipal(), getToken()); TableOperations tops = conn.tableOperations(); // give the tracer some time to start while (!tops.exists("trace")) { sleepUninterruptibly(1, TimeUnit.SECONDS); } }
static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException { c.tableOperations().create(tablename); BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig()); for (int i = 0; i < 10; i++) { Mutation m = new Mutation("" + i); m.put(input_cf, input_cq, "row" + i); bw.addMutation(m); } bw.close(); Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq); assertEquals(0, hash.waitFor()); Scanner s = c.createScanner(tablename, Authorizations.EMPTY); s.fetchColumn(new Text(input_cf), new Text(output_cq)); int i = 0; for (Entry<Key,Value> entry : s) { MessageDigest md = MessageDigest.getInstance("MD5"); byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes())); assertEquals(entry.getValue().toString(), new String(check)); i++; } } }
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor()); cluster.stop();
log.debug("Removing rfile '" + rfile + "'"); cluster.getFileSystem().delete(rfile, false); Process info = cluster.exec(CreateEmpty.class, rfile.toString()); assertEquals(0, info.waitFor());