if (progressToken.getTotalBucketCount() == 0) { assert(progressToken.isEmpty()) : "inconsistent progress state"; progressToken.setTotalBucketCount(1L << distributionBitCount); progressToken.setDistributionBitCount(distributionBitCount); progressToken.setBucketCursor(0); progressToken.setFinishedBucketCount(0); this.distributionBitCount = distributionBitCount; this.distributionBitCount = progressToken.getDistributionBitCount(); if (progressToken.getTotalBucketCount() != (1L << progressToken.getDistributionBitCount())) { throw new IllegalArgumentException("Total bucket count in existing progress is not " + "consistent with that of the current document selection"); if (!progress.isFinished()) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Importing unfinished progress token with " + "bits: " + progressToken.getDistributionBitCount() + ", active: " + progressToken.getActiveBucketCount() + ", pending: " + progressToken.getPendingBucketCount() + ", cursor: " + progressToken.getBucketCursor() + ", finished: " + progressToken.getFinishedBucketCount() + ", total: " + progressToken.getTotalBucketCount()); if (!progress.isEmpty()) { if (progressToken.getActiveBucketCount() > 0) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Progress token had active buckets upon range " +
public static ProgressToken fromSerializedString(String serializedString) { byte[] serialized; try { serialized = Base64.getUrlDecoder().decode(serializedString); } catch (IllegalArgumentException e) { // Legacy visitor tokens were encoded with MIME Base64 which may fail decoding as URL-safe. // Try again with MIME decoder to avoid breaking upgrade scenarios. // TODO(vekterli): remove once this is no longer a risk. serialized = Base64.getMimeDecoder().decode(serializedString); } return new ProgressToken(serialized); }
public ExplicitBucketSource(Set<BucketId> superbuckets, int distributionBitCount, ProgressToken progress) { this.distributionBitCount = progress.getDistributionBitCount(); this.totalBucketCount = superbuckets.size(); if (progress.getTotalBucketCount() == 0) { progress.setTotalBucketCount(this.totalBucketCount); progress.setDistributionBitCount(distributionBitCount); this.distributionBitCount = distributionBitCount; if (progress.getTotalBucketCount() != totalBucketCount || (progress.getFinishedBucketCount() + progress.getPendingBucketCount() + progress.getActiveBucketCount() != totalBucketCount)) { throw new IllegalArgumentException("Total bucket count in existing progress is not " + "consistent with that of the current document selection"); if (progress.getBucketCursor() != 0) { this.distributionBitCount = progress.getDistributionBitCount(); if (progress.isFinished() || !progress.isEmpty()) return; progress.addBucket(id, new BucketId(), ProgressToken.BucketState.BUCKET_PENDING);
if (progressToken.getActiveBucketCount() > 0) { flushActive = true; if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Holding off new/pending buckets and consistency " + "correction until all " + progress.getActiveBucketCount() + " active buckets have been updated"); progressToken.setInconsistentState(true); } else { int delta = distributionBitCount - progressToken.getDistributionBitCount(); if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Increasing distribution bits for full bucket " + "space range source from " + progressToken.getDistributionBitCount() + " to " + distributionBitCount); progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() << delta); progressToken.setBucketCursor(progressToken.getBucketCursor() << delta); } else if (delta < 0) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Decreasing distribution bits for full bucket " + "space range source from " + progressToken.getDistributionBitCount() + " to " + distributionBitCount + " bits"); progressToken.setBucketCursor(progressToken.getBucketCursor() >>> -delta); progressToken.setFinishedBucketCount(progressToken.getFinishedBucketCount() >>> -delta);
assert(progressToken.getDistributionBitCount() == bucketSource.getDistributionBitCount()) : "inconsistent distribution bit counts for progress and source"; assert(hasNext()); if (progressToken.hasPending()) { TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> buckets = progressToken.getBuckets(); ProgressToken.BucketEntry pending = null; BucketId superbucket = null; if (entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING) { pending = entry.getValue(); superbucket = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey())); break; progressToken.setActiveBucketCount(progressToken.getActiveBucketCount() + 1); progressToken.setPendingBucketCount(progressToken.getPendingBucketCount() - 1); progressToken.addBucket(ret.getSuperbucket(), ret.getProgress(), ProgressToken.BucketState.BUCKET_ACTIVE); return ret;
boolean maybeInconsistent = true; long bucketsSplit = 0, bucketsMerged = 0; long pendingBefore = progressToken.getPendingBucketCount(); ProgressToken p = progressToken; assert(p.getActiveBucketCount() == 0); p.clearAllBuckets(); p.setBucketCursor(0); return; = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets()); for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry : buckets.entrySet()) { assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING); BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey())); if (pending.getUsedBits() < targetDistBits) { if (pending.getUsedBits() + 1 < targetDistBits) { maybeInconsistent = true; // Do another pass p.splitPendingBucket(pending); ++bucketsSplit; buckets = new TreeMap<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry>(p.getBuckets()); for (Map.Entry<ProgressToken.BucketKeyWrapper, ProgressToken.BucketEntry> entry : buckets.entrySet()) { assert(entry.getValue().getState() == ProgressToken.BucketState.BUCKET_PENDING); BucketId pending = new BucketId(ProgressToken.keyToBucketId(entry.getKey().getKey())); if (pending.getUsedBits() > targetDistBits) {
public void update(BucketId superbucket, BucketId progress, ProgressToken token) { progressToken.updateProgress(superbucket, progress); if (progressToken.getActiveBucketCount() == 0) { if (flushActive) { if (log.isLoggable(LogLevel.DEBUG)) { assert(progressToken.getDistributionBitCount() == distributionBitCount); if (progressToken.getPendingBucketCount() <= progressToken.getBucketCursor()) { progressToken.setFinishedBucketCount(progressToken.getBucketCursor() - progressToken.getPendingBucketCount());
private VisitResult doVisit( VisitorControlHandler visitorControlHandler, LocalDataVisitorHandler localDataVisitorHandler, RestUri restUri) throws RestApiException { try { visitorControlHandler.waitUntilDone(); // VisitorParameters' session timeout implicitly triggers timeout failures. throwIfFatalVisitingError(visitorControlHandler, restUri); } catch (InterruptedException e) { throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTrace(e), restUri, RestUri.apiErrorCodes.INTERRUPTED)); } if (localDataVisitorHandler.getErrors().isEmpty()) { Optional<String> continuationToken; if (! visitorControlHandler.getProgress().isFinished()) { continuationToken = Optional.of(visitorControlHandler.getProgress().serializeToString()); } else { continuationToken = Optional.empty(); } return new VisitResult(continuationToken, localDataVisitorHandler.getCommaSeparatedJsonDocuments()); } throw new RestApiException(Response.createErrorResponse(500, localDataVisitorHandler.getErrors(), restUri, RestUri.apiErrorCodes.UNSPECIFIED)); }
protected void mergePendingBucket(BucketId bucket) { BucketKeyWrapper bucketKey = bucketToKeyWrapper(bucket); BucketEntry entry = buckets.get(bucketKey); if (entry == null) { BucketEntry rightSibling = buckets.get(bucketToKeyWrapper(rightCheck)); " will be lost due to merging; potential for duplicates in result-set"); buckets.remove(bucketToKeyWrapper(rightCheck)); --pendingBucketCount; BucketEntry leftSibling = buckets.get(bucketToKeyWrapper(leftSanityCheck)); assert(leftSibling == null) : "bucket merge sanity checking failed"; addBucket(newMerged, entry.getProgress(), BucketState.BUCKET_PENDING);
BucketEntry entry = buckets.get(bucketToKeyWrapper(superbucket)); if (entry == null) { return true; long bucketKey = bucketToKey(bucket.getId()); long progressKey = bucketToKey(entry.getProgress().getId());
private void markSessionCompleted() { // 'done' is only ever written when token mutex is held, so safe to check // outside of completionMonitor lock. assert(!done) : "Session was marked as completed more than once"; log.log(LogLevel.DEBUG, "Visitor session '" + sessionName + "' has completed"); if (params.getLocalDataHandler() != null) { params.getLocalDataHandler().onDone(); } // If skipFatalErrors is set and a fatal error did occur, fail // the session now with the first encountered error message. if (progress.getToken().containsFailedBuckets()) { transitionTo(new StateDescription(State.FAILED, progress.getToken().getFirstErrorMsg())); } // NOTE: transitioning to COMPLETED will not override a failure // state, so it's safe to always do this. transitionTo(new StateDescription(State.COMPLETED)); params.getControlHandler().onDone(state.toCompletionCode(), state.getDescription()); synchronized (completionMonitor) { done = true; completionMonitor.notifyAll(); } }
private static BucketKeyWrapper bucketToKeyWrapper(BucketId bucket) { return new BucketKeyWrapper(bucketToKey(bucket.getId())); } /*
subId = new BucketId("BucketId(0x" + buckets[1] + ")"); addBucket(superId, subId, BucketState.BUCKET_PENDING);
BucketEntry existing = buckets.put(bucketToKeyWrapper(superbucket), entry); if (existing != null) { throw new IllegalStateException(
private void markBucketProgressAsFailed(BucketId bucket, BucketId subProgress, String message) { progress.getToken().addFailedBucket(bucket, subProgress, message); progress.getIterator().update(bucket, ProgressToken.FINISHED_BUCKET); }
params.setResumeToken(ProgressToken.fromSerializedString(options.continuation.get())); } catch (Exception e) { throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTrace(e), restUri, RestUri.apiErrorCodes.UNSPECIFIED));
BucketKeyWrapper bucketKey = bucketToKeyWrapper(bucket); BucketEntry entry = buckets.get(bucketKey); if (entry == null) { | (1L << bucket.getUsedBits())); addBucket(splitLeft, entry.getProgress(), BucketState.BUCKET_PENDING); addBucket(splitRight, entry.getProgress(), BucketState.BUCKET_PENDING);
/** * @param superbucket The superbucket of which <code>progress</code> is * a sub-bucket * @param progress The sub-bucket for which a fractional progress should * be calculated * @return a value in [0, 1] specifying how far the (sub-bucket) has * reached in its superbucket. This is calculated by looking at the * bucket's split factor. */ public synchronized double progressFraction(BucketId superbucket, BucketId progress) { long revBits = bucketToKey(progress.getId()); int superUsed = superbucket.getUsedBits(); int progressUsed = progress.getUsedBits(); if (progressUsed == 0 || progressUsed < superUsed) { return 0; } int splitCount = progressUsed - superUsed; if (splitCount == 0) return 1; // Superbucket or inconsistent used-bits // Extract reversed split-bits revBits <<= superUsed; revBits >>>= 64 - splitCount; return (double)(revBits + 1) / (double)(1L << splitCount); }
public ProgressToken(byte[] serialized) { BufferSerializer in = new BufferSerializer(GrowableByteBuffer.wrap(serialized)); distributionBits = in.getInt(null); bucketCursor = in.getLong(null); finishedBucketCount = in.getLong(null); totalBucketCount = in.getLong(null); int progressCount = in.getInt(null); for (int i = 0; i < progressCount; ++i) { long key = in.getLong(null); long value = in.getLong(null); addBucket(new BucketId(key), new BucketId(value), BucketState.BUCKET_PENDING); } }
BucketKeyWrapper superKey = bucketToKeyWrapper(superbucket); BucketEntry entry = buckets.get(superKey); if (entry == null) {