public Future<Long> apply(Void done) { return asyncGetFullLedgerList(true, false).flatMap(new Function<List<LogSegmentMetadata>, Future<Long>>() { public Future<Long> apply(List<LogSegmentMetadata> ledgerList) { List<Future<Long>> futureCounts = new ArrayList<Future<Long>>(ledgerList.size()); for (LogSegmentMetadata ledger : ledgerList) { if (ledger.getLogSegmentSequenceNumber() >= beginDLSN.getLogSegmentSequenceNo()) { futureCounts.add(asyncGetLogRecordCount(ledger, beginDLSN)); } } return Future.collect(futureCounts).map(new Function<List<Long>, Long>() { public Long apply(List<Long> counts) { return sum(counts); } }); } }); } });
@Override public void run() { if (interrupted) { logger.debug("ListFutureProcessor is interrupted."); return; } if (!itemsIter.hasNext()) { promise.setValue(results); return; } processFunc.apply(itemsIter.next()).addEventListener(this); } }
/** * Equivalent to calling `Monitor.restoring`. */ public static <T> T restoring(Supplier<T> supplier) { return it.restoring(func0(supplier)); }
/** * Equivalent to calling `Monitor.restoring`. */ public static <T> T restoring(Supplier<T> supplier) { return it.restoring(func0(supplier)); }
/** * Executes a batch of requests asynchronously in a partitioned manner, * with the specified parallelism. * * @param requests List of requests to execute. * @param parallelism Desired parallelism (must be > 0). * @param executeBatch Function to execute each partitioned batch of requests. * @param <Req> Request type. * @param <Resp> Response type. * @return List of response futures. */ public static <Req, Resp> List<Future<Resp>> executePartitioned( List<Req> requests, int parallelism, Function<List<Req>, Future<Resp>> executeBatch) { MorePreconditions.checkNotBlank(requests); Preconditions.checkArgument(parallelism > 0); Preconditions.checkNotNull(executeBatch); int sizePerPartition = Math.max(requests.size() / parallelism, 1); List<List<Req>> partitions = Lists.partition(requests, sizePerPartition); List<Future<Resp>> futures = Lists.newArrayListWithCapacity(partitions.size()); for (final List<Req> request : partitions) { futures.add(executeBatch.apply(request)); } return futures; }
/** * Equivalent to calling `Monitor.using`. */ public static <T> T using(Monitor monitor, Supplier<T> supplier) { return it.using(monitor, func0(supplier)); }
@Override public Future<PinLaterDequeueResponse> apply(final Permit permit) { return futurePool.apply(new ExceptionalFunction0<PinLaterDequeueResponse>() { @Override public PinLaterDequeueResponse applyE() throws Throwable { return dequeueJobsImpl(source, request, numAutoRetries); } }).respond(new Function<Try<PinLaterDequeueResponse>, BoxedUnit>() { @Override public BoxedUnit apply(Try<PinLaterDequeueResponse> responseTry) { permit.release(); return BoxedUnit.UNIT; } }); } });
for (Function<byte[], Void> watchers : configFileInfo.changeWatchers) { try { watchers.apply(newContents); } catch (Exception e) { LOG.error(
/** * Equivalent to calling `Monitor.using`. */ public static <T> T using(Monitor monitor, Supplier<T> supplier) { return it.using(monitor, func0(supplier)); }
conn = jedisPool.getResource(); selectRedisDB(conn, redisDBNum); return func.apply(conn); } catch (JedisConnectionException e) { jedisPool.returnBrokenResource(conn);
/** * Adds a watch on the specified file. The file must exist, otherwise a FileNotFoundException * is returned. If the file is deleted after a watch is established, the watcher will log errors * but continue to monitor it, and resume watching if it is recreated. * * @param filePath path to the file to watch. * @param onUpdate function to call when a change is detected to the file. The entire contents * of the file will be passed in to the function. Note that onUpdate will be * called once before this call completes, which facilities initial load of data. * This callback is executed synchronously on the watcher thread - it is * important that the function be non-blocking. */ public synchronized void addWatch(String filePath, Function<byte[], Void> onUpdate) throws IOException { MorePreconditions.checkNotBlank(filePath); Preconditions.checkNotNull(onUpdate); // Read the file and make the initial onUpdate call. File file = new File(filePath); ByteSource byteSource = Files.asByteSource(file); onUpdate.apply(byteSource.read()); // Add the file to our map if it isn't already there, and register the new change watcher. ConfigFileInfo configFileInfo = watchedFileMap.get(filePath); if (configFileInfo == null) { configFileInfo = new ConfigFileInfo(file.lastModified(), byteSource.hash(HASH_FUNCTION)); watchedFileMap.put(filePath, configFileInfo); } configFileInfo.changeWatchers.add(onUpdate); }