final PositionImpl lastLedgerPosition = new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1); if (log.isDebugEnabled()) { log.debug("[{}] Last ledger position {}", managedLedgerName, lastLedgerPosition);
boolean expired = hasLedgerRetentionExpired(ls.getTimestamp()); boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(); "[{}] Checking ledger {} -- time-old: {} sec -- " + "expired: {} -- over-quota: {} -- current-ledger: {}", name, ls.getLedgerId(), (clock.millis() - ls.getTimestamp()) / 1000.0, expired, overRetentionQuota, currentLedger.getId()); if (ls.getLedgerId() == currentLedger.getId()) { log.debug("[{}] ledger id skipped for deletion as it is currently being written to", name, ls.getLedgerId()); break; } else if (expired) { log.debug("[{}] Ledger {} has expired, ts {}", name, ls.getLedgerId(), ls.getTimestamp()); ledgersToDelete.add(ls); } else if (overRetentionQuota) { log.debug("[{}] Ledger {} is over quota", name, ls.getLedgerId()); ledgersToDelete.add(ls); } else { log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId()); break; if (isOffloadedNeedsDelete(ls.getOffloadContext()) && !ledgersToDelete.contains(ls)) { log.debug("[{}] Ledger {} has been offloaded, bookkeeper ledger needs to be deleted", name, ls.getLedgerId()); offloadedLedgersToDelete.add(ls); ledgerCache.remove(ls.getLedgerId());
hash = (19 * hash) + getDescriptor().hashCode(); if (hasLedgerId()) { hash = (37 * hash) + LEDGERID_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getLedgerId()); if (hasEntries()) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getEntries()); if (hasSize()) { hash = (37 * hash) + SIZE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getSize()); if (hasTimestamp()) { hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getTimestamp()); if (hasOffloadContext()) { hash = (37 * hash) + OFFLOADCONTEXT_FIELD_NUMBER; hash = (53 * hash) + getOffloadContext().hashCode();
boolean expired = hasLedgerRetentionExpired(ls.getTimestamp()); boolean overRetentionQuota = TOTAL_SIZE_UPDATER.get(this) > ((long) config.getRetentionSizeInMB()) * 1024 * 1024; log.debug( "[{}] Checking ledger {} -- time-old: {} sec -- expired: {} -- over-quota: {} -- current-ledger: {}", name, ls.getLedgerId(), (System.currentTimeMillis() - ls.getTimestamp()) / 1000.0, expired, overRetentionQuota, currentLedger.getId()); if (ls.getLedgerId() == currentLedger.getId() || (!expired && !overRetentionQuota)) { if (log.isDebugEnabled()) { if (!expired) { log.debug("[{}] ledger id skipped for deletion as unexpired: {}", name, ls.getLedgerId()); ls.getLedgerId(), TOTAL_SIZE_UPDATER.get(this), config.getRetentionSizeInMB()); ledgerCache.remove(ls.getLedgerId()); ledgers.remove(ls.getLedgerId()); NUMBER_OF_ENTRIES_UPDATER.addAndGet(this, -ls.getEntries()); TOTAL_SIZE_UPDATER.addAndGet(this, -ls.getSize()); entryCache.invalidateAllEntries(ls.getLedgerId());
boolean expired = hasLedgerRetentionExpired(ls.getTimestamp()); boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(); "[{}] Checking ledger {} -- time-old: {} sec -- " + "expired: {} -- over-quota: {} -- current-ledger: {}", name, ls.getLedgerId(), (clock.millis() - ls.getTimestamp()) / 1000.0, expired, overRetentionQuota, currentLedger.getId()); if (ls.getLedgerId() == currentLedger.getId()) { log.debug("[{}] ledger id skipped for deletion as it is currently being written to", name, ls.getLedgerId()); break; } else if (expired) { log.debug("[{}] Ledger {} has expired, ts {}", name, ls.getLedgerId(), ls.getTimestamp()); ledgersToDelete.add(ls); } else if (overRetentionQuota) { log.debug("[{}] Ledger {} is over quota", name, ls.getLedgerId()); ledgersToDelete.add(ls); } else { log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId()); break; if (isOffloadedNeedsDelete(ls.getOffloadContext()) && !ledgersToDelete.contains(ls)) { log.debug("[{}] Ledger {} has been offloaded, bookkeeper ledger needs to be deleted", name, ls.getLedgerId()); offloadedLedgersToDelete.add(ls); ledgerCache.remove(ls.getLedgerId());
ledgers.put(ls.getLedgerId(), ls); LedgerInfo info = LedgerInfo.newBuilder().setLedgerId(id) .setEntries(lh.getLastAddConfirmed() + 1).setSize(lh.getLength()) .setTimestamp(System.currentTimeMillis()).build();
long size = e.getValue().getSize(); sizeSummed += size; boolean alreadyOffloaded = e.getValue().hasOffloadContext() && e.getValue().getOffloadContext().getComplete(); if (alreadyOffloaded) { alreadyOffloadedSize += size; log.info("[{}] Going to automatically offload ledgers {}" + ", total size = {}, already offloaded = {}, to offload = {}", name, toOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()), sizeSummed, alreadyOffloadedSize, toOffloadSize); } else {
ml.getLedgersInfo().forEach((id, li) -> { LedgerInfo info = new LedgerInfo(); info.ledgerId = li.getLedgerId(); info.entries = li.getEntries(); info.size = li.getSize(); info.offloaded = li.hasOffloadContext() && li.getOffloadContext().getComplete(); stats.ledgers.add(info); });
return transformLedgerInfo(ledgerId, (oldInfo) -> { if (oldInfo.getOffloadContext().hasUidMsb()) { UUID oldUuid = new UUID(oldInfo.getOffloadContext().getUidMsb(), oldInfo.getOffloadContext().getUidLsb()); log.info("[{}] Found previous offload attempt for ledger {}, uuid {}" + ", cleaning up", name, ledgerId, uuid); "Previous failed offload"); LedgerInfo.Builder builder = oldInfo.toBuilder(); builder.getOffloadContextBuilder() .setUidMsb(uuid.getMostSignificantBits())
org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder subBuilder = org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo.newBuilder(); input.readMessage(subBuilder, extensionRegistry); addLedgerInfo(subBuilder.buildPartial());
LedgerInfo info = LedgerInfo.newBuilder().setLedgerId(lh.getId()).setEntries(entriesInLedger) .setSize(lh.getLength()).setTimestamp(System.currentTimeMillis()).build(); ledgers.put(lh.getId(), info);
ml.getLedgersInfo().forEach((id, li) -> { LedgerInfo info = new LedgerInfo(); info.ledgerId = li.getLedgerId(); info.entries = li.getEntries(); info.size = li.getSize(); stats.ledgers.add(info); });
size -= ledgers.values().stream().filter(li -> li.getLedgerId() < slowestConsumerLedgerId) .mapToLong(li -> li.getSize()).sum(); if (ledgerInfo.getEntries() == 0) { size -= consumedLedgerSize(currentLedgerSize, currentLedgerEntries, numEntries); return size; } else { size -= consumedLedgerSize(ledgerInfo.getSize(), ledgerInfo.getEntries(), numEntries); return size;
size -= ledgers.values().stream().filter(li -> li.getLedgerId() < slowestConsumerLedgerId) .mapToLong(li -> li.getSize()).sum(); if (ledgerInfo.getEntries() == 0) { size -= consumedLedgerSize(currentLedgerSize, currentLedgerEntries, numEntries); return size; } else { size -= consumedLedgerSize(ledgerInfo.getSize(), ledgerInfo.getEntries(), numEntries); return size;
return transformLedgerInfo(ledgerId, (oldInfo) -> { UUID existingUuid = new UUID(oldInfo.getOffloadContext().getUidMsb(), oldInfo.getOffloadContext().getUidLsb()); if (existingUuid.equals(uuid)) { LedgerInfo.Builder builder = oldInfo.toBuilder(); builder.getOffloadContextBuilder() .setTimestamp(clock.millis())
public org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo buildPartial() { org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo result = new org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
result = result && (hasLedgerId() == other.hasLedgerId()); if (hasLedgerId()) { result = result && (getLedgerId() == other.getLedgerId()); result = result && (hasEntries() == other.hasEntries()); if (hasEntries()) { result = result && (getEntries() == other.getEntries()); result = result && (hasSize() == other.hasSize()); if (hasSize()) { result = result && (getSize() == other.getSize()); result = result && (hasTimestamp() == other.hasTimestamp()); if (hasTimestamp()) { result = result && (getTimestamp() == other.getTimestamp()); result = result && (hasOffloadContext() == other.hasOffloadContext()); if (hasOffloadContext()) { result = result && getOffloadContext() .equals(other.getOffloadContext());
hash = (19 * hash) + getDescriptor().hashCode(); if (hasLedgerId()) { hash = (37 * hash) + LEDGERID_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getLedgerId()); if (hasEntries()) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getEntries()); if (hasSize()) { hash = (37 * hash) + SIZE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getSize()); if (hasTimestamp()) { hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getTimestamp()); if (hasOffloadContext()) { hash = (37 * hash) + OFFLOADCONTEXT_FIELD_NUMBER; hash = (53 * hash) + getOffloadContext().hashCode();
public PersistentOfflineTopicStats estimateUnloadedTopicBacklog(ManagedLedgerFactoryImpl factory, TopicName topicName) throws Exception { String managedLedgerName = topicName.getPersistenceNamingEncoding(); long numberOfEntries = 0; long totalSize = 0; final NavigableMap<Long, MLDataFormats.ManagedLedgerInfo.LedgerInfo> ledgers = new ConcurrentSkipListMap<>(); final PersistentOfflineTopicStats offlineTopicStats = new PersistentOfflineTopicStats(managedLedgerName, brokerName); // calculate total managed ledger size and number of entries without loading the topic readLedgerMeta(factory, topicName, ledgers); for (MLDataFormats.ManagedLedgerInfo.LedgerInfo ls : ledgers.values()) { numberOfEntries += ls.getEntries(); totalSize += ls.getSize(); if (accurate) { offlineTopicStats.addLedgerDetails(ls.getEntries(), ls.getTimestamp(), ls.getSize(), ls.getLedgerId()); } } offlineTopicStats.totalMessages = numberOfEntries; offlineTopicStats.storageSize = totalSize; if (log.isDebugEnabled()) { log.debug("[{}] Total number of entries - {} and size - {}", managedLedgerName, numberOfEntries, totalSize); } // calculate per cursor message backlog calculateCursorBacklogs(factory, topicName, ledgers, offlineTopicStats); offlineTopicStats.statGeneratedAt.setTime(System.currentTimeMillis()); return offlineTopicStats; }
public org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo buildPartial() { org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo result = new org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.ledgerId_ = ledgerId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.entries_ = entries_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.size_ = size_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.timestamp_ = timestamp_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }