/** * Create a cache which will reside in {@code directory}. This cache is lazily initialized on * first access and will be created if it does not exist. * * @param directory a writable directory * @param valueCount the number of values per cache entry. Must be positive. * @param maxSize the maximum number of bytes this cache should use to store */ public static DiskLruCache create(FileSystem fileSystem, File directory, int appVersion, int valueCount, long maxSize) { if (maxSize <= 0) { throw new IllegalArgumentException("maxSize <= 0"); } if (valueCount <= 0) { throw new IllegalArgumentException("valueCount <= 0"); } // Use a single background thread to evict entries. Executor executor = new ThreadPoolExecutor(0, 1, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), Util.threadFactory("OkHttp DiskLruCache", true)); return new DiskLruCache(fileSystem, directory, appVersion, valueCount, maxSize, executor); }
public void fetch(HttpUrl url) throws IOException { AtomicInteger hostnameCount = new AtomicInteger(); AtomicInteger previous = hostnames.putIfAbsent(url.host(), hostnameCount); if (previous != null) hostnameCount = previous; if (hostnameCount.incrementAndGet() > 100) return; HttpUrl link = response.request().url().resolve(href); if (link == null) continue; // URL is either invalid or its scheme isn't http/https. queue.add(link.newBuilder().fragment(null).build());
public ScriptEngine getEngine() { availableCount.decrementAndGet(); return scriptEngines.poll(); }
long maxId = 0L; final BlockingQueue<Tuple<StandardProvenanceEventRecord, Integer>> eventQueue = new LinkedBlockingQueue<>(100); final AtomicBoolean finishedAdding = new AtomicBoolean(false); final List<Future<?>> futures = new ArrayList<>(); final ExecutorService exec = Executors.newFixedThreadPool(configuration.getIndexThreadPoolSize(), new ThreadFactory() { @Override public Thread newThread(final Runnable r) { final AtomicInteger indexingFailureCount = new AtomicInteger(0); try { for (int i = 0; i < configuration.getIndexThreadPoolSize(); i++) { final Future<?> future = exec.submit(callable); futures.add(future); if (!accepted && indexingFailureCount.get() >= MAX_INDEXING_FAILURE_COUNT) { indexEvents = false; // don't add anything else to the queue. eventQueue.clear(); + "so no more Provenance Events will be indexed for this Provenance file.", writerFile, indexingFailureCount.get(), MAX_INDEXING_FAILURE_COUNT); logger.warn(warning); if (eventReporter != null) { exec.shutdown();
@Override protected int runCmd() throws Exception { ExecutorService executorService = Executors.newFixedThreadPool(numThreads); try { final AtomicInteger numLedgers = new AtomicInteger(0); final CountDownLatch doneLatch = new CountDownLatch(numThreads); final AtomicInteger numFailures = new AtomicInteger(0); final LinkedBlockingQueue<Long> ledgerQueue = new LinkedBlockingQueue<Long>(); ledgerQueue.addAll(ledgers); for (int i = 0; i < numThreads; i++) { final int tid = i; executorService.submit(new Runnable() { @Override public void run() { if (numFailures.get() > 0) { throw new IOException("Encounter " + numFailures.get() + " failures during deleting ledgers"); executorService.shutdown();
static ExecutorService e = Executors.newFixedThreadPool(2); static int N = 1000000; int length = (i == 0) ? 1 : i * 5; System.out.print(length + "\t"); System.out.print(doTest(new LinkedBlockingQueue<Integer>(length), N) + "\t"); System.out.print(doTest(new ArrayBlockingQueue<Integer>(length), N) + "\t"); System.out.print(doTest(new SynchronousQueue<Integer>(), N)); e.shutdown(); long t = System.nanoTime(); e.submit(new Runnable() { public void run() { for (int i = 0; i < n; i++) try { q.put(i); } catch (InterruptedException ex) {} Long r = e.submit(new Callable<Long>() { public Long call() { long sum = 0; for (int i = 0; i < n; i++) try { sum += q.take(); } catch (InterruptedException ex) {} return sum; }).get(); t = System.nanoTime() - t;
this.minActiveRequestPerUser = minActiveRequestPerUser; this.maxActiveRequestsPerUser = maxActiveRequestsPerUser; this.currentLimitPerUser = new AtomicInteger(0); this.currentTotalActiveRequests = new AtomicInteger(0); this.currentActiveRequestsPerUser = new HashMap<Integer, AtomicInteger>(); this.waitingQueue = new PriorityBlockingQueue<UserRequest>(maxWaitingRequests, new UserRequestComparator()); this.lock = new ReentrantLock(); this.threadPoolExecutor = new LocalThreadPoolExecutor(0, maxActiveRequests, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); this.scheduledExecutorService.scheduleWithFixedDelay(new FairnessManager(), 1L, 1L, TimeUnit.SECONDS); throw new RuntimeException("Max limit reached"); if (currentTotalActiveRequests.get() < maxActiveRequests) { lock.lock(); try {
final int jobCount = 4; BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); ExecutorService service = Executors.newCachedThreadPool(); try { PartitionedUnorderedExecutor executor = new PartitionedUnorderedExecutor(queue, service, jobCount); final AtomicInteger interrupted = new AtomicInteger(); jobSemaphore.acquire(); } catch (InterruptedException e) { interrupted.incrementAndGet(); assertThat(interrupted.get(), is(jobCount)); } finally { service.shutdown();
public synchronized boolean isDone() { if (status.get() == RPC_PENDING) { try { Future<R> dataFuture = pendingDataOps.peek(); while (dataFuture != null && dataFuture.isDone()) { dataFuture = pendingDataOps.poll(); R result = dataFuture.get(); this.aggregate(result); dataFuture = pendingDataOps.peek(); } if (pendingDataOps.isEmpty() && status.get() == RPC_PENDING) { completeOperation(); } } catch (Exception e) { status.set(RPC_ERROR); this.exception = e; } } return status.get() > 0; }
queue = new LinkedBlockingQueue<>(10000); Executors.newFixedThreadPool(ASYNCH_RESOURCE_THREADS); BlockingQueue<Object> sourceQueue = new LinkedBlockingQueue<>(resources); for (int i = 0; i < ASYNCH_RESOURCE_THREADS; i++) { sourceQueue.add(TERMINATOR); executor.submit( () -> { try { Object o; while (!completed && (o = sourceQueue.take()) != TERMINATOR) { Resource r = (Resource) o; T mapped = mapper.apply(r); if (mapped != null) { queue.put(mapped); executor.shutdown(); executor.awaitTermination( Long.MAX_VALUE, TimeUnit.MILLISECONDS);
@Test(timeout = 11000) public void fail_fast() throws InterruptedException, ExecutionException { ExecutorService executorService = Executors.newFixedThreadPool(2); CompletionService<Long> completionService = new ExecutorCompletionService<Long>(executorService); BlockingQueue<OWLCompositeObject> queue = new LinkedBlockingQueue<OWLCompositeObject>(); BlockingQueue<OntologySetup> ontologyQueue = new LinkedBlockingQueue<OntologySetup>(); OwlOntologyProducer producer = new OwlOntologyProducer(queue, ontologyQueue, new AtomicInteger(), graph); OntologySetup ontologyConfig = new OntologySetup(); futures.add(completionService.submit(producer)); Thread.sleep(1000); ontologyQueue.put(ontologyConfig); Future<?> completedFuture = completionService.take(); futures.remove(completedFuture); completedFuture.get(); executorService.shutdown(); executorService.awaitTermination(10, TimeUnit.SECONDS);
public synchronized T get() throws InterruptedException, ExecutionException { if (this.exception != null){ throw new ExecutionException(exception); } if (status.get() == RPC_PENDING){ try { for (Future<R> dataFuture = pendingDataOps.poll(); dataFuture != null; dataFuture = pendingDataOps.poll()){ R result = dataFuture.get(); this.aggregate(result); } completeOperation(); } catch (Exception e) { status.set(RPC_ERROR); this.exception = e; } } if (status.get() == RPC_DONE){ return this.getAggregate(); } else if (status.get() == RPC_PENDING){ throw new InterruptedException("RPC timeout"); } else if (exception != null) { throw new ExecutionException(exception); } else { throw new InterruptedException("RPC error"); } }
final BlockingQueue<Long> dirsToCheck = new LinkedBlockingQueue<>(); dirsToCheck.add(mInodeTree.getRoot().getId()); List<Future<List<AlluxioURI>>> results = new ArrayList<>(); Long fileId = dirsToCheck.take(); if (fileId == completionMarker) { // A thread signaled completion. completed++; } else { // A new directory needs to be checked. StartupConsistencyChecker checker = new StartupConsistencyChecker(fileId); results.add(service.submit(checker)); started++; for (Future<List<AlluxioURI>> result : results) { try { inconsistentUris.addAll(result.get()); } catch (Exception e) { service.shutdown(); return inconsistentUris;
@Test public void testJobsAreExecutedInOrder() throws InterruptedException, ExecutionException { BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); ExecutorService service = Executors.newFixedThreadPool(2); try { PartitionedOrderedExecutor executor = new PartitionedOrderedExecutor(queue, service); final AtomicInteger sequence = new AtomicInteger(-1); List<Future<?>> tasks = new ArrayList<>(); for (int i = 0; i < 100; i++) { final int index = i; tasks.add(executor.submit(() -> { assertThat(sequence.getAndSet(index), is(index -1)); return null; })); } for (Future<?> task : tasks) { task.get(); } } finally { service.shutdown(); } } }
this.httpClient = client; final AtomicInteger threadId = new AtomicInteger(0); final ThreadFactory threadFactory = r -> { final Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); t.setName("Replicate Request Thread-" + threadId.incrementAndGet()); return t; }; executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 5, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); maintenanceExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(final Runnable r) {
final AtomicInteger count = new AtomicInteger(); final BlockingQueue<Object> ref = new LinkedBlockingQueue<>(); for (final Invoker<T> invoker : selected) { executor.execute(new Runnable() { @Override public void run() { Object ret = ref.poll(timeout, TimeUnit.MILLISECONDS); if (ret instanceof Throwable) { Throwable e = (Throwable) ret;
int tilingThreadCount = Integer.parseInt(MCRIView2Tools.getIView2Property("TilingThreads")); ThreadFactory slaveFactory = new ThreadFactory() { AtomicInteger tNum = new AtomicInteger(); final AtomicInteger activeThreads = new AtomicInteger(); final LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(); ThreadPoolExecutor baseExecutor = new ThreadPoolExecutor(tilingThreadCount, tilingThreadCount, 1, TimeUnit.DAYS, workQueue, slaveFactory) { try { while (activeThreads.get() < tilingThreadCount) { runLock.lock(); try { if (!running) { if (job != null && !tilingServe.getExecutor().isShutdown()) { LOGGER.info("Creating:{}", job.getPath()); tilingServe.submit(getTilingAction(job)); runLock.unlock();
return 0; ExecutorService executorService = Executors.newCachedThreadPool(); final RateLimiter rateLimiter = RateLimiter.create(fenceRate); final byte[] passwd = getConf().getBKDigestPW().getBytes(UTF_8); final CountDownLatch latch = new CountDownLatch(ledgers.size()); final AtomicInteger numPendings = new AtomicInteger(ledgers.size()); final LinkedBlockingQueue<Long> ledgersQueue = new LinkedBlockingQueue<Long>(); ledgersQueue.addAll(ledgers); executorService.submit(new Runnable() { @Override public void run() {