public void killProcess(ServerType type, ProcessReference proc) throws ProcessNotFoundException, InterruptedException { getClusterControl().killProcess(type, proc); }
public Map<ServerType,Collection<ProcessReference>> getProcesses() { Map<ServerType,Collection<ProcessReference>> result = new HashMap<>(); MiniAccumuloClusterControl control = getClusterControl(); result.put(ServerType.MASTER, references(control.masterProcess)); result.put(ServerType.TABLET_SERVER, references(control.tabletServerProcesses.toArray(new Process[0]))); if (null != control.zooKeeperProcess) { result.put(ServerType.ZOOKEEPER, references(control.zooKeeperProcess)); } if (null != control.gcProcess) { result.put(ServerType.GARBAGE_COLLECTOR, references(control.gcProcess)); } return result; }
@Test public void test() throws Exception { log.debug("Starting Monitor"); cluster.getClusterControl().startAllServers(ServerType.MONITOR); String monitorLocation = null; while (null == monitorLocation) { try { monitorLocation = MonitorUtil.getLocation(getConnector().getInstance()); } catch (Exception e) { // ignored } if (null == monitorLocation) { log.debug("Could not fetch monitor HTTP address from zookeeper"); Thread.sleep(2000); } } URL url = new URL("https://" + monitorLocation); log.debug("Fetching web page " + url); String result = FunctionalTestUtils.readAll(url.openStream()); assertTrue(result.length() > 100); assertTrue(result.indexOf("Accumulo Overview") >= 0); }
getCluster().getClusterControl().killProcess(ServerType.TABLET_SERVER, pr); --count;
@Override public void run() { try { int i = 0; while (!stop.get()) { sleepUninterruptibly(10, TimeUnit.SECONDS); System.out.println("Restarting"); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().start(); // read the metadata table to know everything is back up Iterators.size( getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator()); i++; } System.out.println("Restarted " + i + " times"); } catch (Exception ex) { log.error("{}", ex.getMessage(), ex); } } };
MiniAccumuloClusterControl control = getClusterControl();
@Test(timeout = 2 * 60 * 1000) public void test() throws Exception { // not yet, please String tableName = getUniqueNames(1)[0]; cluster.getClusterControl().stop(ServerType.GARBAGE_COLLECTOR); Connector c = getConnector(); c.tableOperations().create(tableName); // count the number of WALs in the filesystem assertEquals(2, countWALsInFS(cluster)); cluster.getClusterControl().stop(ServerType.TABLET_SERVER); cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR); cluster.getClusterControl().start(ServerType.TABLET_SERVER); Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator()); // let GC run UtilWaitThread.sleep(3 * 5 * 1000); assertEquals(2, countWALsInFS(cluster)); }
@Test public void test() throws Exception { getCluster().getClusterControl().start(ServerType.MONITOR); final Connector c = getConnector(); final String tableName = getUniqueNames(1)[0];
mac.getClusterControl().stop(ServerType.MASTER); Thread.sleep(5000); log.info("Restarting master"); mac.getClusterControl().start(ServerType.MASTER);
@Test public void test() throws Exception { final Connector c = getConnector(); getCluster().getClusterControl().kill(ServerType.GARBAGE_COLLECTOR, "localhost"); final String tableName = getUniqueNames(1)[0]; c.tableOperations().create(tableName);
MiniAccumuloClusterControl control = getClusterControl();
private void testWalPerformanceOnce() throws Exception { // get time with a small WAL, which will cause many WAL roll-overs long avg1 = getAverage(); // use a bigger WAL max size to eliminate WAL roll-overs Connector c = getConnector(); c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G"); c.tableOperations().flush(MetadataTable.NAME, null, null, true); c.tableOperations().flush(RootTable.NAME, null, null, true); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().start(); long avg2 = getAverage(); log.info(String.format("Average run time with small WAL %,d with large WAL %,d", avg1, avg2)); assertTrue(avg1 > avg2); double percent = (100. * avg1) / avg2; log.info(String.format("Percent of large log: %.2f%%", percent)); assertTrue(percent < 125.); }
getCluster().getClusterControl().start(ServerType.TABLET_SERVER, null, ImmutableMap.of(Property.TSERV_CLIENTPORT.getKey(), "" + restartedServer.getPort(), Property.TSERV_PORTSEARCH.getKey(), "false"),
@Test public void replicatedStatusEntriesAreDeleted() throws Exception { getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR); cluster.getClusterControl().stop(ServerType.TABLET_SERVER); cluster.getClusterControl().start(ServerType.TABLET_SERVER); cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR);
c.instanceOperations().waitForBalance(); log.info("Restarting"); getCluster().getClusterControl().kill(ServerType.TABLET_SERVER, "localhost"); getCluster().start(); log.info("Verifying");
@Test public void replicationRecordsAreClosedAfterGarbageCollection() throws Exception { getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR); cluster.getClusterControl().stop(ServerType.TABLET_SERVER); cluster.getClusterControl().start(ServerType.TABLET_SERVER);
@Test public void test() throws Exception { MiniAccumuloClusterImpl mac = getCluster(); MiniAccumuloClusterControl control = mac.getClusterControl(); control.stop(GARBAGE_COLLECTOR); Connector c = getConnector();
getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
@Test(timeout = 2 * 60 * 1000) public void test() throws Exception { getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().getClusterControl().start(ServerType.TABLET_SERVER);