private Set<ServerName> getServersForTable(MiniHBaseCluster cluster, byte[] table) throws Exception { List<HRegion> indexRegions = cluster.getRegions(table); Set<ServerName> indexServers = new HashSet<ServerName>(); for (HRegion region : indexRegions) { indexServers.add(cluster.getServerHoldingRegion(region.getRegionName())); } return indexServers; } }
public CompactionState(final RegionCoprocessorEnvironment env, final TableName stateTable, long pruneFlushInterval) { this.regionName = env.getRegion().getRegionName(); this.regionNameAsString = env.getRegion().getRegionNameAsString(); DataJanitorState dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return env.getTable(stateTable); } }); this.pruneUpperBoundWriterSupplier = new PruneUpperBoundWriterSupplier(stateTable, dataJanitorState, pruneFlushInterval); this.pruneUpperBoundWriter = pruneUpperBoundWriterSupplier.get(); }
public CompactionState(final RegionCoprocessorEnvironment env, final TableName stateTable, long pruneFlushInterval) { this.regionName = env.getRegion().getRegionName(); this.regionNameAsString = env.getRegion().getRegionNameAsString(); DataJanitorState dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return env.getTable(stateTable); } }); this.pruneUpperBoundWriterSupplier = new PruneUpperBoundWriterSupplier(stateTable, dataJanitorState, pruneFlushInterval); this.pruneUpperBoundWriter = pruneUpperBoundWriterSupplier.get(); }
@Override public boolean equals(Object o) { if (!(o instanceof HRegion)) { return false; } return Bytes.equals(this.getRegionName(), ((HRegion) o).getRegionName()); }
} catch (IOException e) { LOG.error("Cache flush failed" + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionName())) : ""), RemoteExceptionHandler.checkIOException(e));
@Override public String toString() { return "[flush region " + Bytes.toStringBinary(region.getRegionName()) + "]"; } }
@Override public int hashCode() { return Bytes.hashCode(this.getRegionName()); }
cluster.getMaster().assign(region.getRegionName());
HServerLoad buildServerLoad() { Collection<HRegion> regions = getOnlineRegionsLocalContext(); TreeMap<byte [], HServerLoad.RegionLoad> regionLoads = new TreeMap<byte [], HServerLoad.RegionLoad>(Bytes.BYTES_COMPARATOR); for (HRegion region: regions) { regionLoads.put(region.getRegionName(), createRegionLoad(region)); } MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); return new HServerLoad(requestCount.get(),(int)metrics.getRequests(), (int)(memory.getUsed() / 1024 / 1024), (int) (memory.getMax() / 1024 / 1024), regionLoads, this.hlog.getCoprocessorHost().getCoprocessors()); }
if (LOG.isDebugEnabled()) { LOG.debug("Registered protocol handler: region="+ Bytes.toStringBinary(getRegionName())+" protocol="+protocol.getName());
@Override protected void updateMeta(final byte [] oldRegion1, final byte [] oldRegion2, HRegion newRegion) throws IOException { byte[][] regionsToDelete = {oldRegion1, oldRegion2}; for (int r = 0; r < regionsToDelete.length; r++) { if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) { latestRegion = null; } Delete delete = new Delete(regionsToDelete[r]); table.delete(delete); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r])); } } newRegion.getRegionInfo().setOffline(true); Put put = new Put(newRegion.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newRegion.getRegionInfo())); table.put(put); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName())); } } }
Put put = new Put(newRegion.getRegionName()); put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newInfo)); root.put(put); if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName()));
@Override public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) { MiniHBaseCluster hbaseCluster = getHBaseCluster(); Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); // make sure consumer config cache is updated for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) { List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName)); for (HRegion region : serverRegions) { results.put(region.getRegionName(), function.apply(region)); } } return results; }
@Override public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) { MiniHBaseCluster hbaseCluster = getHBaseCluster(); Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); // make sure consumer config cache is updated for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) { List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName)); for (HRegion region : serverRegions) { results.put(region.getRegionName(), function.apply(region)); } } return results; }
@Override public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) { MiniHBaseCluster hbaseCluster = getHBaseCluster(); Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); // make sure consumer config cache is updated for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) { List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName)); for (HRegion region : serverRegions) { results.put(region.getRegionName(), function.apply(region)); } } return results; }
@Override public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) { MiniHBaseCluster hbaseCluster = getHBaseCluster(); Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); // make sure consumer config cache is updated for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) { List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName)); for (HRegion region : serverRegions) { results.put(region.getRegionName(), function.apply(region)); } } return results; }
@Override public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) { MiniHBaseCluster hbaseCluster = getHBaseCluster(); Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); // make sure consumer config cache is updated for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) { List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName)); for (HRegion region : serverRegions) { results.put(region.getRegionName(), function.apply(region)); } } return results; }
meta.checkResources(); byte[] row = r.getRegionName(); Integer lid = meta.obtainRowLock(row); try {
private HServerLoad.RegionLoad createRegionLoad(final HRegion r) { byte[] name = r.getRegionName(); int stores = 0; int storefiles = 0;
if (info1 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + Bytes.toStringBinary(meta1.getRegionName())); Bytes.toStringBinary(meta2.getRegionName()));