@Override public TableName getTableName() { return this.getRegionInfo().getTable(); }
@Override public boolean isPrimaryReplicaStore() { return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID; }
/** * Adds a value to the memstore */ public void add(final Cell cell, MemStoreSizing memstoreSizing) { lock.readLock().lock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " + "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName()); } this.memstore.add(cell, memstoreSizing); } finally { lock.readLock().unlock(); currentParallelPutCount.decrementAndGet(); } }
/** * Adds the specified value to the memstore */ public void add(final Iterable<Cell> cells, MemStoreSizing memstoreSizing) { lock.readLock().lock(); try { if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) { LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " + "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName()); } memstore.add(cells, memstoreSizing); } finally { lock.readLock().unlock(); currentParallelPutCount.decrementAndGet(); } }
/** * Determines if Store should be split. */ public Optional<byte[]> getSplitPoint() { this.lock.readLock().lock(); try { // Should already be enforced by the split policy! assert !this.getRegionInfo().isMetaRegion(); // Not split-able if we find a reference store file present in the store. if (hasReferences()) { LOG.trace("Not splittable; has references: {}", this); return Optional.empty(); } return this.storeEngine.getStoreFileManager().getSplitPoint(); } catch(IOException e) { LOG.warn("Failed getting store size for {}", this, e); } finally { this.lock.readLock().unlock(); } return Optional.empty(); }
/** * Generate a name for throttling, to prevent name conflict when multiple IO operation running * parallel on the same store. * @param store the Store instance on which IO operation is happening * @param opName Name of the IO operation, e.g. "flush", "compaction", etc. * @return The name for throttling */ public static String getNameForThrottling(HStore store, String opName) { int counter; for (;;) { counter = NAME_COUNTER.get(); int next = counter == Integer.MAX_VALUE ? 0 : counter + 1; if (NAME_COUNTER.compareAndSet(counter, next)) { break; } } return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + NAME_DELIMITER + counter; } }
"Completed" + (cr.isMajor() ? " major" : "") + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in " + this + " of " + this.getRegionInfo().getShortNameToLog() + " into "); if (sfs.isEmpty()) { message.append("none, ");
@VisibleForTesting void replaceStoreFiles(Collection<HStoreFile> compactedFiles, Collection<HStoreFile> result) throws IOException { this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().addCompactionResults(compactedFiles, result); synchronized (filesCompacting) { filesCompacting.removeAll(compactedFiles); } // These may be null when the RS is shutting down. The space quota Chores will fix the Region // sizes later so it's not super-critical if we miss these. RegionServerServices rsServices = region.getRegionServerServices(); if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) { updateSpaceQuotaAfterFileReplacement( rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(), compactedFiles, result); } } finally { this.lock.writeLock().unlock(); } }
LOG.info("Refreshing store files for region " + this.getRegionInfo().getRegionNameAsString() + " files to add: " + toBeAddedFiles + " files to remove: " + toBeRemovedFiles);
completeCompaction(delSfs); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));
String regionInfo; if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) { regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString(); } else { regionInfo = this.toString();
private StripeCompactor createCompactor() throws Exception { HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo")); StoreFileWritersCapture writers = new StoreFileWritersCapture(); HStore store = mock(HStore.class); HRegionInfo info = mock(HRegionInfo.class); when(info.getRegionNameAsString()).thenReturn("testRegion"); when(store.getColumnFamilyDescriptor()).thenReturn(col); when(store.getRegionInfo()).thenReturn(info); when( store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); Configuration conf = HBaseConfiguration.create(); conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); final Scanner scanner = new Scanner(); return new StripeCompactor(conf, store) { @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return scanner; } @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { return scanner; } }; }
.addAndGet(storeFile.getReader().getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + ", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="
throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); if (control) { throughputController.start(flushName);
try { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + "store " + this + " region " + this.getRegionInfo().getRegionNameAsString()); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); " last=" + Bytes.toStringBinary(lastKey)); LOG.debug("Region bounds: first=" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey())); if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) { throw new WrongRegionException( "Bulk load file " + srcPath.toString() + " does not fit inside region " + this.getRegionInfo().getRegionNameAsString());
when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName()); request.setTracker(tracker); LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName() + ": Initiating " + (request.isMajor() ? "major" : "minor") + " compaction" + (request.isAllFiles() ? " (all files)" : ""));
long time = snapshot.getTimeRangeTracker().getMax(); mobFileWriter = mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(), store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(), false); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); boolean control = throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); if (control) { throughputController.start(flushName);
if (!finished) { throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");