@SuppressWarnings("unchecked") long getOpts() throws Exception { String uri = getCluster().getMiniDfs().getHttpUri(0); URL url = new URL(uri + "/jmx"); log.debug("Fetching web page " + url); String jsonString = FunctionalTestUtils.readAll(url.openStream()); Gson gson = new Gson(); Map<Object,Object> jsonObject = (Map<Object,Object>) gson.fromJson(jsonString, Object.class); List<Object> beans = (List<Object>) jsonObject.get("beans"); for (Object bean : beans) { Map<Object,Object> map = (Map<Object,Object>) bean; if (map.get("name").toString().equals("Hadoop:service=NameNode,name=NameNodeActivity")) { return (long) Double.parseDouble(map.get("FileInfoOps").toString()); } } return 0; }
@Test(timeout = 45 * 1000) public void test() throws Exception { final Connector conn = this.getConnector(); // Yes, there's a tabletserver assertEquals(1, conn.instanceOperations().getTabletServers().size()); final String tableName = getUniqueNames(1)[0]; conn.tableOperations().create(tableName); // Kill dfs cluster.getMiniDfs().shutdown(); // ask the tserver to do something final AtomicReference<Exception> ex = new AtomicReference<>(); Thread splitter = new Thread() { @Override public void run() { try { TreeSet<Text> splits = new TreeSet<>(); splits.add(new Text("X")); conn.tableOperations().addSplits(tableName, splits); } catch (Exception e) { ex.set(e); } } }; splitter.start(); // wait for the tserver to give up on writing to the WAL while (conn.instanceOperations().getTabletServers().size() == 1) { sleepUninterruptibly(1, TimeUnit.SECONDS); } }
@Test(timeout = 2 * 60 * 1000) public void test() throws Exception { final Connector conn = this.getConnector(); // Yes, there's a tabletserver assertEquals(1, conn.instanceOperations().getTabletServers().size()); final String tableName = getUniqueNames(1)[0]; conn.tableOperations().create(tableName); BatchWriter bw = conn.createBatchWriter(tableName, null); for (int i = 0; i < N; i++) { Mutation m = new Mutation("" + i); m.put("", "", ""); bw.addMutation(m); } bw.close(); conn.tableOperations().flush(tableName, null, null, true); // Kill dfs cluster.getMiniDfs().restartNameNode(false); assertEquals(N, Iterators.size(conn.createScanner(tableName, Authorizations.EMPTY).iterator())); }