MockMetadata(LedgerMetadata toCopy) { ensembleSize = toCopy.getEnsembleSize(); writeQuorumSize = toCopy.getWriteQuorumSize(); ackQuorumSize = toCopy.getAckQuorumSize(); lastEntryId = toCopy.getLastEntryId(); length = toCopy.getLength(); digestType = toCopy.getDigestType(); ctime = toCopy.getCtime(); isClosed = toCopy.isClosed(); customMetadata = ImmutableMap.copyOf(toCopy.getCustomMetadata()); }
SequenceReadRequest(List<BookieSocketAddress> ensemble, long lId, long eId) { super(ensemble, lId, eId); this.sentReplicas = new BitSet(lh.getLedgerMetadata().getWriteQuorumSize()); this.erroredReplicas = new BitSet(lh.getLedgerMetadata().getWriteQuorumSize()); }
void setLac(long lac) { this.lac = lac; this.receivedResponseSet = new BitSet( lh.getLedgerMetadata().getWriteQuorumSize()); this.receivedResponseSet.set(0, lh.getLedgerMetadata().getWriteQuorumSize()); }
writer.append(String.valueOf(metadata.getWriteQuorumSize())).append(LINE_SPLITTER); writer.append(String.valueOf(metadata.getEnsembleSize())).append(LINE_SPLITTER); writer.append(String.valueOf(metadata.getLength())).append(LINE_SPLITTER);
synchronized BookieSocketAddress sendNextRead() { if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getWriteQuorumSize()) { // we are done, the read has failed from all replicas, just fail the // read fail(firstError); return null; } // ToDo: pick replica with writable PCBC. ISSUE #1239 // https://github.com/apache/bookkeeper/issues/1239 int replica = nextReplicaIndexToReadFrom; int bookieIndex = writeSet.get(nextReplicaIndexToReadFrom); nextReplicaIndexToReadFrom++; try { BookieSocketAddress to = ensemble.get(bookieIndex); sendReadTo(bookieIndex, to, this); sentToHosts.add(to); sentReplicas.set(replica); return to; } catch (InterruptedException ie) { LOG.error("Interrupted reading entry " + this, ie); Thread.currentThread().interrupt(); fail(BKException.Code.InterruptedException); return null; } }
int writeQ = metadata.getWriteQuorumSize(); int ackQ = metadata.getAckQuorumSize(); Map<String, byte[]> customMetadata = metadata.getCustomMetadata();
PendingReadOp(LedgerHandle lh, ClientContext clientCtx, long startEntryId, long endEntryId, boolean isRecoveryRead) { this.seq = new ArrayList<>((int) ((endEntryId + 1) - startEntryId)); this.future = new CompletableFuture<>(); this.lh = lh; this.clientCtx = clientCtx; this.startEntryId = startEntryId; this.endEntryId = endEntryId; this.isRecoveryRead = isRecoveryRead; this.allowFailFast = false; numPendingEntries = endEntryId - startEntryId + 1; requiredBookiesMissingEntryForRecovery = getLedgerMetadata().getWriteQuorumSize() - getLedgerMetadata().getAckQuorumSize() + 1; heardFromHosts = new HashSet<>(); heardFromHostsBitSet = new BitSet(getLedgerMetadata().getEnsembleSize()); }
boolean isLedgerClosed = ledgerMetadata.isClosed(); int ensembleSize = ledgerMetadata.getEnsembleSize(); int writeQuorumSize = ledgerMetadata.getWriteQuorumSize(); ledgerMetadata.getWriteQuorumSize(), ledgerMetadata.getAckQuorumSize(), ledgerMetadata.getEnsembleSize()); List<BookieSocketAddress> currentSegmentEnsemble = segments.get(segmentNo).getValue();
bkc.getPlacementPolicy().replaceBookie( lh.getLedgerMetadata().getEnsembleSize(), lh.getLedgerMetadata().getWriteQuorumSize(), lh.getLedgerMetadata().getAckQuorumSize(), lh.getLedgerMetadata().getCustomMetadata(),
/** * Send to next replica speculatively, if required and possible. * This returns the host we may have sent to for unit testing. * @return host we sent to if we sent. null otherwise. */ @Override synchronized BookieSocketAddress maybeSendSpeculativeRead(BitSet heardFrom) { if (nextReplicaIndexToReadFrom >= getLedgerMetadata().getWriteQuorumSize()) { return null; } BitSet sentTo = getSentToBitSet(); sentTo.and(heardFrom); // only send another read if we have had no successful response at all // (even for other entries) from any of the other bookies we have sent the // request to if (sentTo.cardinality() == 0) { clientCtx.getClientStats().getSpeculativeReadCounter().inc(); return sendNextRead(); } else { return null; } }
public static LedgerMetadataBuilder from(LedgerMetadata other) { LedgerMetadataBuilder builder = new LedgerMetadataBuilder(); builder.metadataFormatVersion = other.getMetadataFormatVersion(); builder.ensembleSize = other.getEnsembleSize(); builder.writeQuorumSize = other.getWriteQuorumSize(); builder.ackQuorumSize = other.getAckQuorumSize(); builder.state = other.getState(); if (builder.state == State.CLOSED) { builder.lastEntryId = Optional.of(other.getLastEntryId()); builder.length = Optional.of(other.getLength()); } builder.ensembles.putAll(other.getAllEnsembles()); if (other.hasPassword()) { builder.password = Optional.of(other.getPassword()); builder.digestType = Optional.of(other.getDigestType()); } builder.ctime = other.getCtime(); /** Hack to get around fact that ctime was never versioned correctly */ builder.storeCtime = LedgerMetadataUtils.shouldStoreCtime(other); builder.customMetadata = ImmutableMap.copyOf(other.getCustomMetadata()); return builder; }
final long entryToRead = curEntryId; final EntryExistsCallback eecb = new EntryExistsCallback(lh.getLedgerMetadata().getWriteQuorumSize(), new GenericCallback<Boolean>() { public void operationComplete(int rc, Boolean result) {
builder.setQuorumSize(metadata.getWriteQuorumSize()) .setAckQuorumSize(metadata.getAckQuorumSize()) .setEnsembleSize(metadata.getEnsembleSize())
writeHeader(os, METADATA_FORMAT_VERSION_3); LedgerMetadataFormat.Builder builder = LedgerMetadataFormat.newBuilder(); builder.setQuorumSize(metadata.getWriteQuorumSize()) .setAckQuorumSize(metadata.getAckQuorumSize()) .setEnsembleSize(metadata.getEnsembleSize())
&& getLedgerMetadata().getEnsembleSize() == getLedgerMetadata().getWriteQuorumSize()) { stickyBookieIndex = clientCtx.getPlacementPolicy().getStickyReadBookieIndex(metadata, Optional.empty()); } else { metadata.getWriteQuorumSize(), metadata.getAckQuorumSize(), metadata.getEnsembleSize());