/** * Starts Accumulo and Zookeeper processes. Can only be called once. */ public void start() throws IOException, InterruptedException { impl.start(); }
private void restartTServer() throws Exception { for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.start(); }
private void restartTServer() throws Exception { for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.start(); }
private void restartTServer() throws Exception { for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.start(); }
@Before public void setUp() throws Exception { createMiniAccumulo(); Exception lastException = null; for (int i = 0; i < 3; i++) { try { cluster.start(); return; } catch (ZooKeeperBindException e) { lastException = e; log.warn("Failed to start MiniAccumuloCluster, assumably due to ZooKeeper issues", lastException); Thread.sleep(3000); createMiniAccumulo(); } } throw new RuntimeException("Failed to start MiniAccumuloCluster after three attempts", lastException); }
@Before public void setup() throws Exception { MiniClusterHarness harness = new MiniClusterHarness(); // Create a primary and a peer instance, both with the same "root" user primary = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), getConfigCallback(PRIMARY_NAME), kdc); primary.start(); peer = harness.create(getClass().getName(), testName.getMethodName() + "_peer", new PasswordToken("unused"), getConfigCallback(PEER_NAME), kdc); peer.start(); // Enable kerberos auth Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); }
@Override public void run() { try { int i = 0; while (!stop.get()) { sleepUninterruptibly(10, TimeUnit.SECONDS); System.out.println("Restarting"); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().start(); // read the metadata table to know everything is back up Iterators.size( getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator()); i++; } System.out.println("Restarted " + i + " times"); } catch (Exception ex) { log.error("{}", ex.getMessage(), ex); } } };
@Before public void startMac() throws Exception { MiniClusterHarness harness = new MiniClusterHarness(); mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() { @Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) { Map<String,String> site = cfg.getSiteConfig(); site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s"); cfg.setSiteConfig(site); } }); mac.getConfig().setNumTservers(1); mac.start(); // Enabled kerberos auth Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); }
@Before public void startMac() throws Exception { MiniClusterHarness harness = new MiniClusterHarness(); mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() { @Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) { Map<String,String> site = cfg.getSiteConfig(); site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s"); // Reduce the period just to make sure we trigger renewal fast site.put(Property.GENERAL_KERBEROS_RENEWAL_PERIOD.getKey(), "5s"); cfg.setSiteConfig(site); } }); mac.getConfig().setNumTservers(1); mac.start(); // Enabled kerberos auth Configuration conf = new Configuration(false); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); }
@Test public void dontGCRootLog() throws Exception { killMacGc(); // dirty metadata Connector c = getConnector(); String table = getUniqueNames(1)[0]; c.tableOperations().create(table); // let gc run for a bit cluster.start(); sleepUninterruptibly(20, TimeUnit.SECONDS); killMacGc(); // kill tservers for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, ref); } // run recovery cluster.start(); // did it recover? Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY); Iterators.size(scanner.iterator()); }
cluster.stop(); log.info("starting up"); cluster.start();
private void testWalPerformanceOnce() throws Exception { // get time with a small WAL, which will cause many WAL roll-overs long avg1 = getAverage(); // use a bigger WAL max size to eliminate WAL roll-overs Connector c = getConnector(); c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G"); c.tableOperations().flush(MetadataTable.NAME, null, null, true); c.tableOperations().flush(RootTable.NAME, null, null, true); getCluster().getClusterControl().stop(ServerType.TABLET_SERVER); getCluster().start(); long avg2 = getAverage(); log.info(String.format("Average run time with small WAL %,d with large WAL %,d", avg1, avg2)); assertTrue(avg1 > avg2); double percent = (100. * avg1) / avg2; log.info(String.format("Percent of large log: %.2f%%", percent)); assertTrue(percent < 125.); }
accumulo2.start(); Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance"); } catch (RuntimeException e) {
@Test public void test() throws Exception { Connector c = getConnector(); c.tableOperations().create("test_ingest"); BatchWriter bw = c.createBatchWriter("test_ingest", null); Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); bw.addMutation(m); bw.close(); // kill zookeeper for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER)) cluster.killProcess(ServerType.ZOOKEEPER, proc); // give the servers time to react sleepUninterruptibly(1, TimeUnit.SECONDS); // start zookeeper back up cluster.start(); // use the tservers Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY); Iterator<Entry<Key,Value>> i = s.iterator(); assertTrue(i.hasNext()); assertEquals("row", i.next().getKey().getRow().toString()); assertFalse(i.hasNext()); // use the master c.tableOperations().delete("test_ingest"); }
log.info("Restarting"); getCluster().getClusterControl().kill(ServerType.TABLET_SERVER, "localhost"); getCluster().start(); log.info("Verifying"); for (String table : tables) {
log.info("Created tmp file {}", tmp.toString()); getCluster().stop(); getCluster().start();
@Test public void gcTest() throws Exception { killMacGc(); Connector c = getConnector(); c.tableOperations().create("test_ingest"); c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K"); TestIngest.Opts opts = new TestIngest.Opts(); VerifyIngest.Opts vopts = new VerifyIngest.Opts(); vopts.rows = opts.rows = 10000; vopts.cols = opts.cols = 1; opts.setPrincipal("root"); vopts.setPrincipal("root"); TestIngest.ingest(c, cluster.getFileSystem(), opts, new BatchWriterOpts()); c.tableOperations().compact("test_ingest", null, null, true, true); int before = countFiles(); while (true) { sleepUninterruptibly(1, TimeUnit.SECONDS); int more = countFiles(); if (more <= before) break; before = more; } // restart GC getCluster().start(); sleepUninterruptibly(15, TimeUnit.SECONDS); int after = countFiles(); VerifyIngest.verifyIngest(c, vopts, new ScannerOpts()); assertTrue(after < before); }