private OrderedScheduler(String name, int corePoolSize, ThreadFactory threadFactory, boolean traceTaskExecution, long traceTaskExecutionWarnTimeUs, StatsLogger statsLogger, StatsLogger perExecutorStatsLogger) { this.name = name; this.corePoolSize = corePoolSize; this.executors = new MonitoredScheduledThreadPoolExecutor[corePoolSize]; this.futurePools = new MonitoredFuturePool[corePoolSize]; for (int i = 0; i < corePoolSize; i++) { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat(name + "-executor-" + i + "-%d") .setThreadFactory(threadFactory) .build(); StatsLogger broadcastStatsLogger = BroadCastStatsLogger.masterslave(perExecutorStatsLogger.scope("executor-" + i), statsLogger); executors[i] = new MonitoredScheduledThreadPoolExecutor( 1, tf, broadcastStatsLogger, traceTaskExecution); futurePools[i] = new MonitoredFuturePool( new ExecutorServiceFuturePool(executors[i]), broadcastStatsLogger.scope("futurepool"), traceTaskExecution, traceTaskExecutionWarnTimeUs); } this.random = new Random(System.currentTimeMillis()); }
public Future<Set<String>> getQueueNames() { return futurePool.apply(new ExceptionalFunction0<Set<String>>() { @Override public Set<String> applyE() throws Throwable { return getQueueNamesImpl(); } }); }
public Future<Void> createQueue(final String name) { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { createQueueImpl(name); return null; } }); }
private FuturePool buildFuturePool(ExecutorService executorService, StatsLogger statsLogger) { FuturePool futurePool = new ExecutorServiceFuturePool(executorService); return new MonitoredFuturePool( futurePool, statsLogger, conf.getEnableTaskExecutionStats(), conf.getTaskExecutionWarnTimeMicros()); }
@Override public Future<Void> apply(final List<PinLaterJobAckInfo> jobAckInfos) { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { for (PinLaterJobAckInfo jobAckInfo : jobAckInfos) { ackSingleJob(request.getQueueName(), false, jobAckInfo, numAutoRetries); } return null; } }); } }));
TestFuturePool() { executor = Executors.newScheduledThreadPool(1); pool = new ExecutorServiceFuturePool(executor); wrapper = new SafeQueueingFuturePool<T>(pool); } public void shutdown() {
@Override public Future<Void> apply(final List<PinLaterJobAckInfo> jobAckInfos) { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { for (PinLaterJobAckInfo jobAckInfo : jobAckInfos) { ackSingleJob(request.getQueueName(), true, jobAckInfo, numAutoRetries); } return null; } }); } }));
/** * @see FuturePool$#apply(java.util.concurrent.ExecutorService) */ public static ExecutorServiceFuturePool newFuturePool(ExecutorService executor) { return new ExecutorServiceFuturePool(executor); }
@Override public Future<Void> apply(final List<PinLaterCheckpointJobRequest> checkpointRequests) { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { for (PinLaterCheckpointJobRequest checkpointRequest : checkpointRequests) { checkpointSingleJob(source, request.getQueueName(), checkpointRequest, numAutoRetries); } return null; } }); } }));
/** * @see FuturePool$#apply(java.util.concurrent.ExecutorService) */ public static ExecutorServiceFuturePool newFuturePool(ExecutorService executor) { return new ExecutorServiceFuturePool(executor); }
public Future<Integer> deleteJobs(final PinLaterDeleteJobsRequest request) { // Execute deleteJobs query on each shard until we have updated 'limit' number of jobs. return futurePool.apply(new ExceptionalFunction0<Integer>() { @Override public Integer applyE() throws Throwable { int remainingLimit = request.getLimit(); List<String> shardNames = getRandomShardShuffle(); for (final String shardName : shardNames) { int numDeleted = deleteJobsFromShard( request.getQueueName(), shardName, request.getJobState(), request.getPriority(), request.getBodyRegexToMatch(), remainingLimit); remainingLimit -= numDeleted; if (remainingLimit <= 0) { break; } } return request.getLimit() - remainingLimit; } }); }
/** * @see FuturePool$#apply(java.util.concurrent.ExecutorService) */ public static ExecutorServiceFuturePool newFuturePool(ExecutorService executor) { return new ExecutorServiceFuturePool(executor); }
@Override public Future<PinLaterEnqueueResponse> apply(final List<PinLaterJob> jobs) { return futurePool.apply(new ExceptionalFunction0<PinLaterEnqueueResponse>() { @Override public PinLaterEnqueueResponse applyE() throws Throwable { PinLaterEnqueueResponse response = new PinLaterEnqueueResponse(); for (PinLaterJob job : jobs) { // Collect stats around job body size. Stats.addMetric("job_body_size_" + request.getQueueName(), job.getBody().length); Stats.addMetric("job-body-size", job.getBody().length); // Check whether the priority is supported. if (job.getPriority() > numPriorityLevels || job.getPriority() < 1) { Stats.incr(String.format("%s-priority-not-supported-enqueue", backendName)); throw new PinLaterException(ErrorCode.PRIORITY_NOT_SUPPORTED, String.valueOf(job.getPriority())); } response.addToJobDescriptors(enqueueSingleJob( request.getQueueName(), job, numAutoRetries)); } return response; } }); } });
this.futurePool = new ExecutorServiceFuturePool(Executors.newFixedThreadPool( futurePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(
public Future<Integer> retryFailedJobs(final PinLaterRetryFailedJobsRequest request) { // Execute retryFailedJobs query on each shard until we have updated 'limit' number of jobs. return futurePool.apply(new ExceptionalFunction0<Integer>() { @Override public Integer applyE() throws Throwable { long currentTimeMillis = System.currentTimeMillis(); int remainingLimit = request.getLimit(); List<String> shardNames = getRandomShardShuffle(); for (final String shardName : shardNames) { int numRetried = retryFailedJobsFromShard( request.getQueueName(), shardName, request.getPriority(), request.getAttemptsToAllow(), request.isSetRunAfterTimestampMillis() ? request.getRunAfterTimestampMillis() : currentTimeMillis, remainingLimit); remainingLimit -= numRetried; if (remainingLimit <= 0) { break; } } return request.getLimit() - remainingLimit; } }); }
/** * Identical to deleteQueue method above, but with no password and intended for testing use only. */ @VisibleForTesting public Future<Void> deleteQueue(final String name) { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { deleteQueueImpl(name); return null; } }); }
/** * Clean up all the keys in each shard. This method is only for test use. */ @VisibleForTesting public Future<Void> cleanUpAllShards() { return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { for (final ImmutableMap.Entry<String, RedisPools> shard : shardMap.entrySet()) { RedisUtils.executeWithConnection( shard.getValue().getGeneralRedisPool(), new Function<Jedis, Void>() { @Override public Void apply(Jedis conn) { conn.flushAll(); return null; } }); } return null; } }); }
@Override public Future<Integer> blocking_call() { int delay = r.nextInt(10); // blocking calls will take between 0 and 10 seconds System.out.println("HaverServer:blocking_call requested. Will block for " + delay + " seconds"); Function0<Integer> blockingWork = new ExampleBlockingCall(delay); // Load the blocking call on the threadpool to be scheduled and eventually executed. Once complete, // the result will be returned to the client return esfp.apply(blockingWork); } }
/** * Remove the job hash from redis. This function is used in test to simulate the case where the * job id is still in the queue, while the job hash is evicted by redis LRU. */ @VisibleForTesting public Future<Void> removeJobHash(String jobDescriptor) { final PinLaterJobDescriptor jobDesc = new PinLaterJobDescriptor(jobDescriptor); return futurePool.apply(new ExceptionalFunction0<Void>() { @Override public Void applyE() throws Throwable { RedisUtils.executeWithConnection( shardMap.get(jobDesc.getShardName()).getGeneralRedisPool(), new Function<Jedis, Void>() { @Override public Void apply(Jedis conn) { String hashRedisKey = RedisBackendUtils.constructHashRedisKey( jobDesc.getQueueName(), jobDesc.getShardName(), jobDesc.getLocalId()); conn.del(hashRedisKey); return null; } }); return null; } }); } }
@Override public Future<PinLaterDequeueResponse> apply(final Permit permit) { return futurePool.apply(new ExceptionalFunction0<PinLaterDequeueResponse>() { @Override public PinLaterDequeueResponse applyE() throws Throwable { return dequeueJobsImpl(source, request, numAutoRetries); } }).respond(new Function<Try<PinLaterDequeueResponse>, BoxedUnit>() { @Override public BoxedUnit apply(Try<PinLaterDequeueResponse> responseTry) { permit.release(); return BoxedUnit.UNIT; } }); } });