public void readEntriesFromAllBookies(final LedgerHandle lh, long eid, final GenericCallback<Set<ReadResult<InputStream>>> callback) { List<Integer> writeSet = lh.distributionSchedule.getWriteSet(eid); final AtomicInteger numBookies = new AtomicInteger(writeSet.size()); final Set<ReadResult<InputStream>> readResults = new HashSet<ReadResult<InputStream>>();
void initiate() { // capture currentNonDurableLastAddConfirmed // remember that we are inside OrderedExecutor, this induces a strict ordering // on the sequence of events this.currentNonDurableLastAddConfirmed = lh.pendingAddsSequenceHead; if (LOG.isDebugEnabled()) { LOG.debug("force {} clientNonDurableLac {}", lh.ledgerId, currentNonDurableLastAddConfirmed); } // we need to send the request to every bookie in the ensamble this.ackSet = lh.distributionSchedule.getEnsembleAckSet(); DistributionSchedule.WriteSet writeSet = lh.getDistributionSchedule() .getEnsembleSet(currentNonDurableLastAddConfirmed); try { for (int i = 0; i < writeSet.size(); i++) { sendForceLedgerRequest(writeSet.get(i)); } } finally { writeSet.recycle(); } }
public boolean isStoredEntryId(long entryId, int bookieIndex) { return schedule.hasEntry(entryId, bookieIndex); }
PendingReadLacOp(LedgerHandle lh, BookieClient bookieClient, List<BookieSocketAddress> ensemble, LacCallback cb) { this.lh = lh; this.bookieClient = bookieClient; this.cb = cb; this.numResponsesPending = ensemble.size(); this.coverageSet = lh.distributionSchedule.getCoverageSet(); this.currentEnsemble = ensemble; }
PendingWriteLacOp(LedgerHandle lh, ClientContext clientCtx, List<BookieSocketAddress> ensemble, AddLacCallback cb, Object ctx) { this.lh = lh; this.clientCtx = clientCtx; this.cb = cb; this.ctx = ctx; this.lac = LedgerHandle.INVALID_ENTRY_ID; ackSet = lh.distributionSchedule.getAckSet(); currentEnsemble = ensemble; }
ReadLACAndEntryRequest(List<BookieSocketAddress> ensemble, long lId, long eId) { this.entryImpl = LedgerEntryImpl.create(lId, eId); this.ensemble = ensemble; this.writeSet = lh.getDistributionSchedule().getEnsembleSet(eId); if (clientCtx.getConf().enableReorderReadSequence) { this.orderedEnsemble = clientCtx.getPlacementPolicy().reorderReadLACSequence(ensemble, lh.getBookiesHealthInfo(), writeSet.copy()); } else { this.orderedEnsemble = writeSet.copy(); } }
/** * Get the first stored entry id of the fragment in the given failed bookies. * * @param bookieIndex * the bookie index in the ensemble. * @return first stored entry id on the bookie. */ public Long getFirstStoredEntryId(int bookieIndex) { long firstEntry = firstEntryId; for (int i = 0; i < ensemble.size() && firstEntry <= lastKnownEntryId; i++) { if (schedule.hasEntry(firstEntry, bookieIndex)) { return firstEntry; } else { firstEntry++; } } return LedgerHandle.INVALID_ENTRY_ID; }
public ReadLastConfirmedOp(LedgerHandle lh, BookieClient bookieClient, List<BookieSocketAddress> ensemble, LastConfirmedDataCallback cb) { this.cb = cb; this.bookieClient = bookieClient; this.maxRecoveredData = new RecoveryData(LedgerHandle.INVALID_ENTRY_ID, 0); this.lh = lh; this.numResponsesPending = lh.getLedgerMetadata().getEnsembleSize(); this.coverageSet = lh.distributionSchedule.getCoverageSet(); this.currentEnsemble = ensemble; }
static PendingAddOp create(LedgerHandle lh, ClientContext clientCtx, List<BookieSocketAddress> ensemble, ByteBuf payload, EnumSet<WriteFlag> writeFlags, AddCallbackWithLatency cb, Object ctx) { PendingAddOp op = RECYCLER.get(); op.lh = lh; op.clientCtx = clientCtx; op.isRecoveryAdd = false; op.cb = cb; op.ctx = ctx; op.entryId = LedgerHandle.INVALID_ENTRY_ID; op.currentLedgerLength = -1; op.payload = payload; op.entryLength = payload.readableBytes(); op.completed = false; op.ensemble = ensemble; op.ackSet = lh.getDistributionSchedule().getAckSet(); op.pendingWriteRequests = 0; op.callbackTriggered = false; op.hasRun = false; op.requestTimeNanos = Long.MAX_VALUE; op.allowFailFast = false; op.qwcLatency = 0; op.writeFlags = writeFlags; return op; }
public void readLacs(final LedgerHandle lh, long eid, final GenericCallback<Set<ReadResult<Long>>> callback) { List<Integer> writeSet = lh.distributionSchedule.getWriteSet(eid); final AtomicInteger numBookies = new AtomicInteger(writeSet.size()); final Set<ReadResult<Long>> readResults = new HashSet<ReadResult<Long>>();
/** * Get the last stored entry id of the fragment in the given failed bookie. * * @param bookieIndex * the bookie index in the ensemble. * @return first stored entry id on the bookie. */ public Long getLastStoredEntryId(int bookieIndex) { long lastEntry = lastKnownEntryId; for (int i = 0; i < ensemble.size() && lastEntry >= firstEntryId; i++) { if (schedule.hasEntry(lastEntry, bookieIndex)) { return lastEntry; } else { lastEntry--; } } return LedgerHandle.INVALID_ENTRY_ID; }
/** * Return a {@link WriteSet} suitable for reading a particular entry. * This will include all bookies that are cotna */ WriteSet getWriteSetForReadOperation(long entryId) { if (stickyBookieIndex != -1) { // When sticky reads are enabled we want to make sure to take // advantage of read-ahead (or, anyway, from efficiencies in // reading sequential data from disk through the page cache). // For this, all the entries that a given bookie prefetches, // should read from that bookie. // For example, with e=2, w=2, a=2 we would have // B-1 B-2 // e-0 X X // e-1 X X // e-2 X X // // In this case we want all the requests to be issued to B-1 (by // preference), so that cache hits will be maximized. // // We can only enable sticky reads if the ensemble==writeQuorum // otherwise the same bookie will not have all the entries // stored return distributionSchedule.getWriteSet(stickyBookieIndex); } else { return distributionSchedule.getWriteSet(entryId); } } }
if (distributionSchedule.hasEntry(firstEntryIter, thisBookieIndexInCurrentEnsemble)) { firstStoredEntryId = firstEntryIter; break;
void initiate(ByteBufList toSend) { this.toSend = toSend; DistributionSchedule.WriteSet writeSet = lh.distributionSchedule.getWriteSet(lac); try { for (int i = 0; i < writeSet.size(); i++) { sendWriteLacRequest(writeSet.get(i)); } } finally { writeSet.recycle(); } }
DistributionSchedule.WriteSet writeSet = lh.distributionSchedule.getWriteSet(entryId); try { if (!writeSet.contains(bookieIndex)) {
public void readEntriesFromAllBookies(final LedgerHandle lh, long eid, final GenericCallback<Set<ReadResult<ByteBuf>>> callback) { WriteSet writeSet = lh.distributionSchedule.getWriteSet(eid); final AtomicInteger numBookies = new AtomicInteger(writeSet.size()); final Set<ReadResult<ByteBuf>> readResults = new HashSet<>();
DistributionSchedule.WriteSet writeSet = lh.getDistributionSchedule().getWriteSet(entryToRead); for (int i = 0; i < writeSet.size(); i++) { BookieSocketAddress addr = curEnsemble.get(writeSet.get(i));
public void readLacs(final LedgerHandle lh, long eid, final GenericCallback<Set<ReadResult<Long>>> callback) { WriteSet writeSet = lh.distributionSchedule.getWriteSet(eid); final AtomicInteger numBookies = new AtomicInteger(writeSet.size()); final Set<ReadResult<Long>> readResults = new HashSet<ReadResult<Long>>();
DistributionSchedule.WriteSet writeSet = lh.distributionSchedule.getWriteSet(entryId);
DistributionSchedule.WriteSet ws = distributionSchedule.getWriteSet(firstEntry); try { if (!waitForWritable(ws, firstEntry, ws.size() - 1, clientCtx.getConf().waitForWriteSetMs)) {