private Future<List<Try<DLSN>>> asTryList(Future<List<Future<DLSN>>> futureList) { return futureList.flatMap(new AbstractFunction1<List<Future<DLSN>>, Future<List<Try<DLSN>>>>() { @Override public Future<List<Try<DLSN>>> apply(List<Future<DLSN>> results) { return Future$.MODULE$.collectToTry(results); } }); }
@Override public Future<Void> asyncClose() { return Utils.closeSequence(scheduler, lock, ledgerAllocator ).flatMap(new AbstractFunction1<Void, Future<Void>>() { @Override public Future<Void> apply(Void result) { return BKLogWriteHandler.super.asyncClose(); } }); }
@Override public Future<Void> delete() { return closeInternal(true).flatMap(new AbstractFunction1<Void, Future<Void>>() { @Override public Future<Void> apply(Void value) { return Utils.zkDelete(zkc, allocatePath, getVersion()); } }); }
@Override public Future<DLSN> getDLSNNotLessThanTxId(final long fromTxnId) { return getLogSegmentsAsync().flatMap(new AbstractFunction1<List<LogSegmentMetadata>, Future<DLSN>>() { @Override public Future<DLSN> apply(List<LogSegmentMetadata> segments) { return getDLSNNotLessThanTxId(fromTxnId, segments); } }); }
static Future<Versioned<byte[]>> getAndCreateAllocationData(final String allocatePath, final ZooKeeperClient zkc) { return Utils.zkGetData(zkc, allocatePath, false) .flatMap(new AbstractFunction1<Versioned<byte[]>, Future<Versioned<byte[]>>>() { @Override public Future<Versioned<byte[]>> apply(Versioned<byte[]> result) { if (null != result && null != result.getVersion() && null != result.getValue()) { return Future.value(result); } return createAllocationData(allocatePath, zkc); } }); }
Future<Long> flushAndCommit() { return flush().flatMap(COMMIT_AFTER_FLUSH_FUNC); }
@Override protected Future<Void> asyncCloseAndComplete() { Future<BKLogSegmentWriter> logSegmentWriterFuture; synchronized (this) { logSegmentWriterFuture = this.rollingFuture; } if (null == logSegmentWriterFuture) { return super.asyncCloseAndComplete(); } else { return logSegmentWriterFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Void>>() { @Override public Future<Void> apply(BKLogSegmentWriter segmentWriter) { return BKAsyncLogWriter.super.asyncCloseAndComplete(); } }); } }
private Future<BKLogSegmentWriter> asyncStartNewLogSegment(final BKLogWriteHandler writeHandler, final long startTxId, final boolean allowMaxTxID) { return writeHandler.recoverIncompleteLogSegments() .flatMap(new AbstractFunction1<Long, Future<BKLogSegmentWriter>>() { @Override public Future<BKLogSegmentWriter> apply(Long lastTxId) { return writeHandler.asyncStartLogSegment(startTxId, false, allowMaxTxID) .onSuccess(new AbstractFunction1<BKLogSegmentWriter, BoxedUnit>() { @Override public BoxedUnit apply(BKLogSegmentWriter newSegmentWriter) { cacheLogWriter(newSegmentWriter); return BoxedUnit.UNIT; } }); } }); }
@Override public Future<Void> applyE(Void in) throws Throwable { return scheduler.apply(lockFunction).flatMap(new ExceptionalFunction<DistributedLock, Future<Void>>() { @Override public Future<Void> applyE(DistributedLock lock) throws IOException { return acquireLockOnExecutorThread(lock); } }); } });
<T> Future<T> processReaderOperation(final Function<BKLogReadHandler, Future<T>> func) { initializeFuturePool(false); return readerFuturePool.apply(new ExceptionalFunction0<BKLogReadHandler>() { @Override public BKLogReadHandler applyE() throws Throwable { return getReadHandlerForListener(true); } }).flatMap(new ExceptionalFunction<BKLogReadHandler, Future<T>>() { @Override public Future<T> applyE(final BKLogReadHandler readHandler) throws Throwable { return func.apply(readHandler); } }); }
Future<List<LogSegmentMetadata>> setLogSegmentsOlderThanDLSNTruncated(final DLSN dlsn) { if (DLSN.InvalidDLSN == dlsn) { List<LogSegmentMetadata> emptyList = new ArrayList<LogSegmentMetadata>(0); return Future.value(emptyList); } scheduleGetAllLedgersTaskIfNeeded(); return asyncGetFullLedgerList(false, false).flatMap( new AbstractFunction1<List<LogSegmentMetadata>, Future<List<LogSegmentMetadata>>>() { @Override public Future<List<LogSegmentMetadata>> apply(List<LogSegmentMetadata> logSegments) { return setLogSegmentsOlderThanDLSNTruncated(logSegments, dlsn); } }); }
private void doMoveStream(final String streamName) throws Exception { Await.result(srcClient.release(streamName).flatMap(new Function<Void, Future<Void>>() { @Override public Future<Void> apply(Void result) { return targetMonitor.check(streamName).addEventListener(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { logger.info("Moved stream {} from {} to {}.", new Object[]{streamName, source, target}); } @Override public void onFailure(Throwable cause) { logger.info("Failed to move stream {} from region {} to {} : ", new Object[]{streamName, source, target, cause}); } }); } })); }
Future<Long> markEndOfStream() { final Stopwatch stopwatch = Stopwatch.createStarted(); Future<BKLogSegmentWriter> logSegmentWriterFuture; synchronized (this) { logSegmentWriterFuture = this.rollingFuture; } if (null == logSegmentWriterFuture) { logSegmentWriterFuture = getLogSegmentWriterForEndOfStream(); } return logSegmentWriterFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Long>>() { @Override public Future<Long> apply(BKLogSegmentWriter w) { return w.markEndOfStream(); } }).addEventListener(new OpStatsListener<Long>(markEndOfStreamOpStatsLogger, stopwatch)); }
public Future<Long> recoverIncompleteLogSegments() { try { FailpointUtils.checkFailPoint(FailpointUtils.FailPointName.FP_RecoverIncompleteLogSegments); } catch (IOException ioe) { return Future.exception(ioe); } return asyncGetFilteredLedgerList(false, false).flatMap(recoverLogSegmentsFunction); }
@Override public Future<LogSegmentMetadata> apply(final LogSegmentMetadata l) { if (!l.isInProgress()) { return Future.value(l); } LOG.info("Recovering last record in log segment {} for {}.", l, getFullyQualifiedName()); return asyncReadLastRecord(l, true, true, true).flatMap( new AbstractFunction1<LogRecordWithDLSN, Future<LogSegmentMetadata>>() { @Override public Future<LogSegmentMetadata> apply(LogRecordWithDLSN lastRecord) { return completeLogSegment(l, lastRecord); } }); }
public Future<Long> apply(Void done) { return asyncGetFullLedgerList(true, false).flatMap(new Function<List<LogSegmentMetadata>, Future<Long>>() { public Future<Long> apply(List<LogSegmentMetadata> ledgerList) { List<Future<Long>> futureCounts = new ArrayList<Future<Long>>(ledgerList.size()); for (LogSegmentMetadata ledger : ledgerList) { if (ledger.getLogSegmentSequenceNumber() >= beginDLSN.getLogSegmentSequenceNo()) { futureCounts.add(asyncGetLogRecordCount(ledger, beginDLSN)); } } return Future.collect(futureCounts).map(new Function<List<Long>, Long>() { public Long apply(List<Long> counts) { return sum(counts); } }); } }); } });
Future<Long> flushAndCommit() { Future<BKLogSegmentWriter> writerFuture; synchronized (this) { if (null != this.rollingFuture) { writerFuture = this.rollingFuture; } else { writerFuture = getCachedLogWriterFuture(); } } if (null == writerFuture) { return Future.value(getLastTxId()); } return writerFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Long>>() { @Override public Future<Long> apply(BKLogSegmentWriter writer) { return writer.flushAndCommit(); } }); }
public Future<Void> asyncClose() { DistributedLock lockToClose; synchronized (this) { if (null != lockAcquireFuture && !lockAcquireFuture.isDefined()) { FutureUtils.cancel(lockAcquireFuture); } lockToClose = readLock; } return Utils.closeSequence(scheduler, readAheadWorker, lockToClose) .flatMap(new AbstractFunction1<Void, Future<Void>>() { @Override public Future<Void> apply(Void result) { if (null != readAheadCache) { readAheadCache.clear(); } if (null != handleCache) { handleCache.clear(); } return BKLogReadHandler.super.asyncClose(); } }); }
@Override public synchronized Future<Long> commit() { // we don't pack control records with user records together // so transmit current output buffer if possible Future<Integer> transmitFuture; try { try { transmitFuture = transmit(); } catch (IOException ioe) { return Future.exception(ioe); } if (null == transmitFuture) { writeControlLogRecord(); return flush(); } } catch (IOException ioe) { return Future.exception(ioe); } return transmitFuture.flatMap(GET_LAST_TXID_ACKNOWLEDGED_AFTER_TRANSMIT_FUNC); }
private Future<BKLogSegmentWriter> doGetLogSegmentWriter(final long firstTxid, final boolean bestEffort, final boolean rollLog, final boolean allowMaxTxID) { if (encounteredError) { return Future.exception(new WriteException(bkDistributedLogManager.getStreamName(), "writer has been closed due to error.")); } Future<BKLogSegmentWriter> writerFuture = asyncGetLedgerWriter(!disableRollOnSegmentError); if (null == writerFuture) { return rollLogSegmentIfNecessary(null, firstTxid, bestEffort, allowMaxTxID); } else if (rollLog) { return writerFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<BKLogSegmentWriter>>() { @Override public Future<BKLogSegmentWriter> apply(BKLogSegmentWriter writer) { return rollLogSegmentIfNecessary(writer, firstTxid, bestEffort, allowMaxTxID); } }); } else { return writerFuture; } }