@Override public Object apply(LogSegment segment) { final long segmentAge = JODA_TIME.milliseconds() - segment.lastModified(); final boolean shouldDelete = segmentAge > kafkaLog.config().retentionMs(); if (shouldDelete) { loggerForCleaner.debug( "[cleanup-time] Removing segment with age {}s, older than then maximum retention age {}s", MILLISECONDS.toSeconds(segmentAge), MILLISECONDS.toSeconds(kafkaLog.config().retentionMs())); } return shouldDelete; } });
/** * A Java transliteration of what the scala implementation does, which unfortunately is declared as private */ protected void flushDirtyLogs() { LOG.debug("Checking for dirty logs to flush..."); final Set<Map.Entry<TopicAndPartition, Log>> entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet(); for (final Map.Entry<TopicAndPartition, Log> topicAndPartitionLogEntry : entries) { final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey(); final Log kafkaLog = topicAndPartitionLogEntry.getValue(); final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime(); try { LOG.debug( "Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}", topicAndPartition.topic(), kafkaLog.config().flushInterval(), kafkaLog.lastFlushTime(), timeSinceLastFlush); if (timeSinceLastFlush >= kafkaLog.config().flushMs()) { kafkaLog.flush(); } } catch (Exception e) { LOG.error("Error flushing topic " + topicAndPartition.topic(), e); } } }
@Override public Object apply(LogSegment segment) { final long segmentAge = JODA_TIME.milliseconds() - segment.lastModified(); final boolean shouldDelete = segmentAge > kafkaLog.config().retentionMs(); if (shouldDelete) { loggerForCleaner.debug( "[cleanup-time] Removing segment with age {}s, older than then maximum retention age {}s", MILLISECONDS.toSeconds(segmentAge), MILLISECONDS.toSeconds(kafkaLog.config().retentionMs())); } return shouldDelete; } });
@Override public Object apply(LogSegment segment) { final long segmentAge = JODA_TIME.milliseconds() - segment.lastModified(); final boolean shouldDelete = segmentAge > kafkaLog.config().retentionMs(); if (shouldDelete) { loggerForCleaner.debug( "[cleanup-time] Removing segment with age {}s, older than then maximum retention age {}s", MILLISECONDS.toSeconds(segmentAge), MILLISECONDS.toSeconds(kafkaLog.config().retentionMs())); } return shouldDelete; } });
/** * A Java transliteration of what the scala implementation does, which unfortunately is declared as private */ protected void flushDirtyLogs() { LOG.debug("Checking for dirty logs to flush..."); final Set<Map.Entry<TopicAndPartition, Log>> entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet(); for (final Map.Entry<TopicAndPartition, Log> topicAndPartitionLogEntry : entries) { final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey(); final Log kafkaLog = topicAndPartitionLogEntry.getValue(); final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime(); try { LOG.debug( "Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}", topicAndPartition.topic(), kafkaLog.config().flushInterval(), kafkaLog.lastFlushTime(), timeSinceLastFlush); if (timeSinceLastFlush >= kafkaLog.config().flushMs()) { kafkaLog.flush(); } } catch (Exception e) { LOG.error("Error flushing topic " + topicAndPartition.topic(), e); } } }
/** * A Java transliteration of what the scala implementation does, which unfortunately is declared as private */ protected void flushDirtyLogs() { LOG.debug("Checking for dirty logs to flush..."); final Set<Map.Entry<TopicAndPartition, Log>> entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet(); for (final Map.Entry<TopicAndPartition, Log> topicAndPartitionLogEntry : entries) { final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey(); final Log kafkaLog = topicAndPartitionLogEntry.getValue(); final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime(); try { LOG.debug( "Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}", topicAndPartition.topic(), kafkaLog.config().flushInterval(), kafkaLog.lastFlushTime(), timeSinceLastFlush); if (timeSinceLastFlush >= kafkaLog.config().flushMs()) { kafkaLog.flush(); } } catch (Exception e) { LOG.error("Error flushing topic " + topicAndPartition.topic(), e); } } }