static void push(Job<Void> job) throws InterruptedException { WORK_QUEUE.push(job); } }
private synchronized void reduceWorkforce() throws InterruptedException { verbose("Decrementing workforce from " + mWorkThreads.size()); // push a the right number of kiss of death tasks to shutdown threads. for (int i = 0; i < mMWorkforceIncrement; i++) { _push(new QueueTask<>(QueueTask.ActionType.Death, null)); } }
public void push(Job<T> job) throws InterruptedException { _push(new QueueTask<>(QueueTask.ActionType.Normal, job)); checkWorkforce(); }
private synchronized void checkWorkforce() { if (mWorkThreads.isEmpty() || (mPendingJobs.size() / mWorkThreads.size() > mGrowthTriggerRation)) { verbose("Request to incrementing workforce from %1$d", mWorkThreads.size()); if (mWorkThreads.size() >= MAX_WORKFORCE_SIZE) { verbose("Already at max workforce %1$d, denied.", MAX_WORKFORCE_SIZE); return; } for (int i = 0; i < mMWorkforceIncrement; i++) { Thread t = new Thread(this, mName + "_" + mThreadId.incrementAndGet()); t.setDaemon(true); mWorkThreads.add(t); t.start(); } verbose("thread-pool size=%1$d", mWorkThreads.size()); } }
mCrunchingRequests = new WorkQueue<AaptProcess>( mLogger, queueThreadContext, "png-cruncher", 5, 2f);
/** * Done with the recording processing, finish processing the outstanding {@link ExecutionRecord} * publication and shutdowns the processing queue. * * @throws InterruptedException */ void finish() throws InterruptedException { workQueue.shutdown(); }
/** * Shutdowns the working queue and wait until all pending requests have * been processed. This needs to be reviewed as jobs can still be added * to the queue once the shutdown process has started.... * @throws InterruptedException if the shutdown sequence is interrupted */ public synchronized void shutdown() throws InterruptedException { // push as many death pills as necessary for (Thread t : mWorkThreads) { _push(new QueueTask<>(QueueTask.ActionType.Death, null)); } // we could use a latch. for (Thread t : mWorkThreads) { t.join(); } mWorkThreads.clear(); mQueueThreadContext.shutdown(); }
private synchronized void checkWorkforce() { if (mWorkThreads.isEmpty() || ((mPendingJobs.size() / mWorkThreads.size() > mGrowthTriggerRatio) && mGrowthTriggerRatio > 0)) { verbose("Request to incrementing workforce from %1$d", mWorkThreads.size()); if (mWorkThreads.size() >= MAX_WORKFORCE_SIZE) { verbose("Already at max workforce %1$d, denied.", MAX_WORKFORCE_SIZE); return; } for (int i = 0; i < mMWorkforceIncrement; i++) { Thread t = new Thread(this, mName + "_" + mThreadId.incrementAndGet()); t.setDaemon(true); mWorkThreads.add(t); t.start(); } verbose("thread-pool size=%1$d", mWorkThreads.size()); } }
ProcessRecorder(@NonNull ExecutionRecordWriter outWriter, @NonNull ILogger iLogger) { this.singletonJobContext = new JobContext<ExecutionRecordWriter>(outWriter); workQueue = new WorkQueue<ExecutionRecordWriter>( iLogger, new WorkQueueContext(), "execRecordWriter", 1); }
@Override public synchronized void end(int key) throws InterruptedException { long startTime = System.currentTimeMillis(); try { waitForAll(key); mOutstandingJobs.get(key).clear(); mLogger.verbose("Job finished in %1$d", System.currentTimeMillis() - startTime); } finally { // even if we have failures, we need to shutdown property the sub processes. if (refCount.decrementAndGet() == 0) { mCrunchingRequests.shutdown(); mLogger.verbose("Shutdown finished in %1$d", System.currentTimeMillis() - startTime); } } } }
/** * Shutdowns the working queue and wait until all pending requests have * been processed. This needs to be reviewed as jobs can still be added * to the queue once the shutdown process has started.... * @throws InterruptedException if the shutdown sequence is interrupted */ public synchronized void shutdown() throws InterruptedException { // push as many death pills as necessary for (Thread t : mWorkThreads) { _push(new QueueTask<T>(QueueTask.ActionType.Death, null)); } // we could use a latch. for (Thread t : mWorkThreads) { t.join(); } mWorkThreads.clear(); mQueueThreadContext.shutdown(); }
public static void push(Job<Void> job) throws InterruptedException { WORK_QUEUE.push(job); } }
private synchronized void reduceWorkforce() throws InterruptedException { verbose("Decrementing workforce from " + mWorkThreads.size()); // push a the right number of kiss of death tasks to shutdown threads. for (int i = 0; i < mMWorkforceIncrement; i++) { _push(new QueueTask<T>(QueueTask.ActionType.Death, null)); } }
public void push(Job<T> job) throws InterruptedException { _push(new QueueTask<T>(QueueTask.ActionType.Normal, job)); checkWorkforce(); }
verbose("Creating a new working thread %1$s", threadName); mQueueThreadContext.creation(Thread.currentThread()); } catch (IOException e) { final QueueTask<T> queueTask = mPendingJobs.take(); if (queueTask.actionType== QueueTask.ActionType.Death) { verbose("Thread(%1$s): Death requested", threadName); return; verbose("Thread(%1$s): scheduling %2$s", threadName, job.getJobTitle()); verbose("Thread(%1$s): job %2$s finished", threadName, job.getJobTitle()); verbose("Thread(%1$s): queue size %2$d", threadName, mPendingJobs.size()); } finally { try { verbose("Thread(%1$s): destruction", threadName); mQueueThreadContext.destruction(Thread.currentThread()); } catch (IOException e) {
new WorkQueue<>( mLogger, queueThreadContext,
@Override public synchronized void end(int key) throws InterruptedException { long startTime = System.currentTimeMillis(); try { waitForAll(key); mOutstandingJobs.get(key).clear(); mLogger.verbose("Job finished in %1$d", System.currentTimeMillis() - startTime); } finally { // even if we have failures, we need to shutdown property the sub processes. if (refCount.decrementAndGet() == 0) { try { mCrunchingRequests.shutdown(); } catch(InterruptedException e) { Thread.interrupted(); mLogger.warning("Error while shutting down crunching queue : %s", e.getMessage()); } mLogger.verbose("Shutdown finished in %1$d", System.currentTimeMillis() - startTime); } } } }
void writeRecord(@NonNull final ExecutionRecord executionRecord) { try { workQueue.push(new Job<ExecutionRecordWriter>("recordWriter", new Task<ExecutionRecordWriter>() { @Override public void run(@NonNull Job<ExecutionRecordWriter> job, @NonNull JobContext<ExecutionRecordWriter> context) throws IOException { context.getPayload().write(executionRecord); job.finished(); } })); } catch (InterruptedException e) { e.printStackTrace(); Thread.currentThread().interrupt(); } }
verbose("Creating a new working thread %1$s", threadName); mQueueThreadContext.creation(Thread.currentThread()); } catch (IOException e) { final QueueTask<T> queueTask = mPendingJobs.take(); if (queueTask.actionType== QueueTask.ActionType.Death) { verbose("Thread(%1$s): Death requested", threadName); return; verbose("Thread(%1$s): scheduling %2$s", threadName, job.getJobTitle()); verbose("Thread(%1$s): job %2$s finished, result=%3$b", threadName, job.getJobTitle(), result); verbose("Thread(%1$s): queue size %2$d", threadName, mPendingJobs.size()); } finally { try { verbose("Thread(%1$s): destruction", threadName); mQueueThreadContext.destruction(Thread.currentThread()); } catch (IOException | InterruptedException e) {
mCrunchingRequests.push(aaptProcessJob); } catch (InterruptedException e) {