@Override public void close() throws IOException { if (logger.fineEnabled()) { logger.fine("Closing buffer"); } synchronized (this) { flushAction = null; } }
/** * @param prefix String to be prefixed in all log messages * @param delegate all logging requests will be delegated to this instance * @return instance of component log writer for soplog component */ public static ComponentLogWriter getSoplogLogWriter(String prefix, LogWriterI18n delegate) { return new ComponentLogWriter("Soplog", prefix, delegate); }
private static ComponentLogWriter getLogger(String region) { return ComponentLogWriter.getHoplogLogWriter(region, LogService.logger()); }
public SortedOplogSetImpl(final SortedOplogFactory factory, Executor exec, Compactor ctor) throws IOException { this.factory = factory; this.flusher = new AbortableTaskService(exec); this.compactor = ctor; rwlock = new ReentrantReadWriteLock(); bufferCount = new AtomicInteger(0); unflushed = new ArrayDeque<SortedBuffer<Integer>>(); current = new AtomicReference<SortedBuffer<Integer>>( new SortedBuffer<Integer>(factory.getConfiguration(), 0)); logger = ComponentLogWriter.getSoplogLogWriter(factory.getConfiguration().getName(), LogService.logger()); if (logger.fineEnabled()) { logger.fine("Creating soplog set"); } }
if (logger.fineEnabled()) { fineLog("Compactor is busy. Ignoring ", request); } catch (Throwable e) { if (e instanceof CompactionIsDisabled) { if (logger.finestEnabled()) { logger.finest(e.getMessage()); logger.info(LocalizedStrings.ONE_ARG, "Compaction request submission failed", e);
assert valueIter != null; logger.fine("Initializing DDL hoplog flush operation"); Path filePath = null; try { if (logger.fineEnabled()) logger.fine("Creating a temporary hoplog " + filePath.getName()); SequenceFile.Writer writer = AbstractHoplog.getSequenceFileWriter(filePath, conf, logger, version); if (logger.fineEnabled()) logger.fine("Closing hoplog " + filePath.getName()); writer.close(); logger.warning(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e); throw new HDFSIOException(e.getMessage(), e);
public HFileSortedOplog(File hfile, SortedOplogConfiguration sopConfig) throws IOException { assert hfile != null; assert sopConfig != null; this.sopConfig = sopConfig; path = fs.makeQualified(new Path(hfile.toString())); // hcache = new CacheConfig(hconf, sopConfig.getCacheDataBlocksOnRead(), sopConfig.getBlockCache(), // HFileSortedOplogFactory.convertStatistics(sopConfig.getStatistics(), sopConfig.getStoreStatistics())); hcache = new CacheConfig(hconf); logger = ComponentLogWriter.getSoplogLogWriter(sopConfig.getName(), LogService.logger()); }
@Override public void append(byte[] key, byte[] value) throws IOException { assert key != null; assert value != null; if (logger.finestEnabled()) { logger.finest(String.format("Appending key %s to %s", Hex.toHex(key), path)); } try { writer.append(key, value); if (bfw != null) { bfw.add(key, 0, key.length); } } catch (IOException e) { throw (IOException) e.fillInStackTrace(); } }
@Override public Thread newThread(Runnable r) { Thread thread = new Thread(r, name + ":" + count.getAndIncrement()); thread.setDaemon(true); if (logger.fineEnabled()) { fineLog("New thread:", name, " poolSize:", getPoolSize(), " active:", getActiveCount()); } return thread; } }
@Override public boolean finestEnabled() { return log.finestEnabled() || (isAtLeast(LogLevel.FINEST)); }
@Override public String toString() { return logger.name(); }
public RecoverableSortedOplogSet(SortedOplogSet sos, long bufferSize, double memLimit) throws IOException { this.sos = sos; this.bufferSize = bufferSize; log = ComponentLogWriter.getSoplogLogWriter(sos.getFactory().getConfiguration().getName(), LogService.logger()); rollLock = new ReentrantLock(); writer = new AtomicReference<AppendLogWriter>(AppendLog.create(nextLogFile())); maxBufferMemory = Math.round(memLimit * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()); }
assert value != null; if (logger.finestEnabled()) { logger.finest(String.format("Appending key %s to %s", Hex.toHex(key.array(), key.arrayOffset(), key.remaining()), path));
@Override public <T> Future<T> submit(Callable<T> task) { HDFSCompactionConfig config; config = HDFSCompactionManager.this.storeConfig.getHDFSCompactionConfig(); throwIfStopped((CompactionRequest) task, config); throwIfPoolSizeChanged((CompactionRequest) task, config); if (logger.fineEnabled()) { fineLog("New:", task, " pool:", getPoolSize(), " active:", getActiveCount()); } return super.submit(task); }
@Override public boolean fineEnabled() { return log.fineEnabled() || isAtLeast(LogLevel.FINE); }
/** * Defers the flush completion to a later time. This is used to ensure correct * ordering of soplogs during parallel flushes. * * @param action the action to perform when ready */ public synchronized void defer(Runnable action) { assert flushAction == null; if (logger.fineEnabled()) { logger.fine("Deferring flush completion"); } flushAction = action; }