private String formatMessage(long extraSleepTime, List<String> gcDiffs) { String ret = "Detected pause in JVM or host machine (eg GC): " + "pause of approximately " + extraSleepTime + "ms\n"; if (gcDiffs.isEmpty()) { ret += "No GCs detected"; } else { ret += Joiner.on("\n").join(gcDiffs); } return ret; }
public void shutdown() throws Exception { executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); if (!ERRORS.isEmpty()) { StringBuilder builder = new StringBuilder().append("Major compaction failed, there were: ").append(ERRORS.size()) .append(" regions / stores that failed compacting\n") .append("Failed compaction requests\n").append("--------------------------\n") .append(Joiner.on("\n").join(ERRORS)); LOG.error(builder.toString()); } if (connection != null) { connection.close(); } LOG.info("All regions major compacted successfully"); }
@Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null"); sb.append( ", hdfs => " + getHdfsRegionDir()); sb.append( ", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append( ", replicaId => " + getReplicaId()); sb.append(" }"); return sb.toString(); }
@Override public String toString() { String familiesString = toFamiliesString(families, family, qualifier); String[] params = new String[] { namespace != null ? "namespace=" + namespace : null, tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, familiesString.length() > 0 ? "family=" + familiesString : null, extraParams.isEmpty() ? null : concatenateExtraParams() }; return Joiner.on(",").skipNulls().join(params); }
@Override public String toString() { return Joiner.on(":").join(configuration.get(HConstants.ZOOKEEPER_QUORUM), configuration.get(HConstants.ZOOKEEPER_CLIENT_PORT), configuration.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); }
private void startHttpServerThread(final String[] args) { LOG.info("Starting HBase Thrift server with HTTP server: " + Joiner.on(" ").join(args)); httpServerException = null; httpServerThread = new Thread(() -> { try { thriftServer.run(args); } catch (Exception e) { httpServerException = e; } }); httpServerThread.setName(ThriftServer.class.getSimpleName() + "-httpServer"); httpServerThread.start(); }
private void startCmdLineThread(final String[] args) { LOG.info("Starting HBase Thrift server with command line: " + Joiner.on(" ").join(args)); cmdLineException = null; cmdLineThread = new Thread(new Runnable() { @Override public void run() { try { thriftServer.run(args); } catch (Exception e) { LOG.error("Error when start thrift server", e); cmdLineException = e; } } }); cmdLineThread.setName(ThriftServer.class.getSimpleName() + "-cmdline"); cmdLineThread.start(); }
/** * Assert that there are no threads running whose name matches the * given regular expression. * @param regex the regex to match against */ public static void assertNoThreadsMatching(String regex) { Pattern pattern = Pattern.compile(regex); ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20); for (ThreadInfo info : infos) { if (info == null) continue; if (pattern.matcher(info.getThreadName()).matches()) { Assert.fail("Leaked thread: " + info + "\n" + Joiner.on("\n").join(info.getStackTrace())); } } } }
/** * Assert that the given comparator orders the given storefiles in the * same way that they're passed. */ private void assertOrdering(Comparator<? super HStoreFile> comparator, HStoreFile ... sfs) { ArrayList<HStoreFile> sorted = Lists.newArrayList(sfs); Collections.shuffle(sorted); Collections.sort(sorted, comparator); LOG.debug("sfs: " + Joiner.on(",").join(sfs)); LOG.debug("sorted: " + Joiner.on(",").join(sorted)); assertTrue(Iterables.elementsEqual(Arrays.asList(sfs), sorted)); }
/** * List all of the files in 'dir' that match the regex 'pattern'. * Then check that this list is identical to 'expectedMatches'. * @throws IOException if the dir is inaccessible */ public static void assertGlobEquals(File dir, String pattern, String ... expectedMatches) throws IOException { Set<String> found = Sets.newTreeSet(); for (File f : FileUtil.listFiles(dir)) { if (f.getName().matches(pattern)) { found.add(f.getName()); } } Set<String> expectedSet = Sets.newTreeSet( Arrays.asList(expectedMatches)); Assert.assertEquals("Bad files matching " + pattern + " in " + dir, Joiner.on(",").join(expectedSet), Joiner.on(",").join(found)); }
private void checkParsing(ParsedLine parsed, Iterable<String> expected) { ArrayList<String> parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), parsed.getColumnLength(i))); } if (!Iterables.elementsEqual(parsedCols, expected)) { fail("Expected: " + Joiner.on(",").join(expected) + "\n" + "Got:" + Joiner.on(",").join(parsedCols)); } }
LOG.error("Server types {} don't support IP address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details.", Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP())); throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
private void waitAndVerifyRegionNum(HMaster master, TableName tablename, int expectedRegionNum) throws Exception { List<Pair<RegionInfo, ServerName>> tableRegionsInMeta; List<RegionInfo> tableRegionsInMaster; long timeout = System.currentTimeMillis() + waitTime; while (System.currentTimeMillis() < timeout) { tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename); tableRegionsInMaster = master.getAssignmentManager().getRegionStates().getRegionsOfTable(tablename); LOG.info(Objects.toString(tableRegionsInMaster)); LOG.info(Objects.toString(tableRegionsInMeta)); int tableRegionsInMetaSize = tableRegionsInMeta.size(); int tableRegionsInMasterSize = tableRegionsInMaster.size(); if (tableRegionsInMetaSize == expectedRegionNum && tableRegionsInMasterSize == expectedRegionNum) { break; } Thread.sleep(250); } tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( TEST_UTIL.getConnection(), tablename); LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta)); assertEquals(expectedRegionNum, tableRegionsInMeta.size()); }
String thread = Thread.currentThread().getName(); LOG.info("== [" + thread + "] Merging regions into one region: " + Joiner.on(",").join(overlap)); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("[" + thread + "] Created new empty container region: " + newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); debugLsr(region.getRegionFileSystem().getRegionDir());
private static void logError(String msg, Context context) throws IOException { TableName table = getTableName(context.getConfiguration()); LOG.error("Failure in chain verification: " + msg); try (Connection connection = ConnectionFactory.createConnection(context.getConfiguration()); Admin admin = connection.getAdmin()) { LOG.error("cluster status:\n" + admin.getClusterStatus()); LOG.error("table regions:\n" + Joiner.on("\n").join(admin.getTableRegions(table))); } } }
protected void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected) throws IOException { scanner.seek(KeyValueUtil.createFirstOnRow(new byte[]{})); List<Cell> returned = Lists.newArrayList(); while (true) { Cell next = scanner.next(); if (next == null) break; returned.add(next); } assertTrue( "Got:\n" + Joiner.on("\n").join(returned) + "\nExpected:\n" + Joiner.on("\n").join(expected), Iterables.elementsEqual(Arrays.asList(expected), returned)); assertNull(scanner.peek()); }
private Table createTableAndLoadData(HMaster master, TableName tablename, int numRegions, int replication) throws Exception { assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions); byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 0; i < splitRows.length; i++) { splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions]; } Table table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows); LOG.info("Created " + table.getName()); if (replication > 1) { HBaseTestingUtility.setReplicas(ADMIN, tablename, replication); LOG.info("Set replication of " + replication + " on " + table.getName()); } loadData(table); LOG.info("Loaded " + table.getName()); verifyRowCount(table, ROWSIZE); LOG.info("Verified " + table.getName()); List<Pair<RegionInfo, ServerName>> tableRegions; TEST_UTIL.waitUntilAllRegionsAssigned(tablename); LOG.info("All regions assigned for table - " + table.getName()); tableRegions = MetaTableAccessor.getTableRegionsAndLocations( TEST_UTIL.getConnection(), tablename); assertEquals("Wrong number of regions in table " + tablename, numRegions * replication, tableRegions.size()); LOG.info(tableRegions.size() + "Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(numRegions * replication, tableRegions.size()); return table; }
@Test public void testLogDirectoryShouldBeDeletedAfterSuccessfulSplit() throws IOException { generateWALs(-1); useDifferentDFSClient(); WALSplitter.split(HBASELOGDIR, WALDIR, OLDLOGDIR, fs, conf, wals); FileStatus [] statuses = null; try { statuses = fs.listStatus(WALDIR); if (statuses != null) { fail("Files left in log dir: " + Joiner.on(",").join(FileUtil.stat2Paths(statuses))); } } catch (FileNotFoundException e) { // hadoop 0.21 throws FNFE whereas hadoop 0.20 returns null } }
@Test @SuppressWarnings({ "deprecation" }) public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter = new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); String[] args = new String[] { "\temp", "tableA", "column1", "column2", "column3" }; JobConf jobConfig = rCounter.createSubmittableJob(args); assertNotNull(jobConfig); assertEquals(0, jobConfig.getNumReduceTasks()); assertEquals("rowcounter", jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(), Result.class); assertEquals(jobConfig.getMapperClass(), RowCounterMapper.class); assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), Joiner.on(' ') .join("column1", "column2", "column3")); assertEquals(jobConfig.getMapOutputKeyClass(), ImmutableBytesWritable.class); }