/** * This attempts to cancel out all pending and in progress tasks. Does not interrupt the running * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877). * @throws InterruptedException */ void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = futures; LOG.debug("cancelling " + tasks.size() + " flush region tasks " + name); for (Future<Void> f: tasks) { f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); while (taskPool.poll() != null) {} stop(); }
/** * This attempts to cancel out all pending and in progress tasks (interruptions issues) * @throws InterruptedException */ void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); for (Future<Void> f: tasks) { // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there // are places in the HBase code where row/region locks are taken and not released in a // finally block. Thus we cancel without interrupting. Cancellations will be slower to // complete but we won't suffer from unreleased locks due to poor code discipline. f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); while (taskPool.poll() != null) {} stop(); }
@After public void testTearDown() throws Exception { while (true) { Future<Object> f = executorService.poll(); if (f == null) break; f.get(); } }
private void _testAll(Parser parser, Path[] testFiles, ParseContext[] parseContext, int numThreads, int numIterations, Map<Path, Extract> truth, ExecutorService ex) { ExecutorCompletionService<Integer> executorCompletionService = new ExecutorCompletionService<>(ex); //use the same parser in all threads for (int i = 0; i < numThreads; i++) { executorCompletionService.submit(new TikaRunner(parser, parseContext[i], numIterations, testFiles, truth)); } int completed = 0; while (completed < numThreads) { //TODO: add a maximum timeout threshold Future<Integer> future = null; try { future = executorCompletionService.poll(1000, TimeUnit.MILLISECONDS); if (future != null) { future.get();//trigger exceptions from thread completed++; } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } }
private void _testDetectorOnAll(Detector detector, Path[] testFiles, int numThreads, int numIterations, Map<Path, MediaType> truth, ExecutorService ex, int randomlyResizeSAXPool) { ExecutorCompletionService<Integer> executorCompletionService = new ExecutorCompletionService<>(ex); executorCompletionService.submit(new SAXPoolResizer(randomlyResizeSAXPool)); for (int i = 0; i < numThreads; i++) { executorCompletionService.submit(new TikaDetectorRunner(detector, numIterations, testFiles, truth)); } int completed = 0; while (completed < numThreads) { //TODO: add a maximum timeout threshold Future<Integer> future = null; try { future = executorCompletionService.poll(1000, TimeUnit.MILLISECONDS); if (future != null) { future.get();//trigger exceptions from thread completed++; } } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } ex.shutdown(); ex.shutdownNow(); }
public Future<V> poll(long timeout, TimeUnit unit) throws InterruptedException { return execComplServ.poll(timeout, unit); } }
public Future<V> poll() { return execComplServ.poll(); }
ExecutorCompletionService completionService = new ExecutorCompletionService(executor); while (completionService.poll() != null) { System.out.println("Still working..."); } executor.shutdown();
@Override public Future<R> poll() { Future<R> result = super.poll(); return submitNewOnCompletion(result); }
@Override public Future<R> poll(long timeout, TimeUnit unit) throws InterruptedException { Future<R> result = super.poll(timeout, unit); return submitNewOnCompletion(result); }
//core 5 max 10 with 60 second idle time ThreadPoolExecutor executor = new ThreadPoolExecutor(5,10,60,TimeUnit.SECONDS,new LinkedBlockingQueue<Runnable>()); ExecutorCompletionService completionService = new ExecutorCompletionService(executor); private final static int MAX_IN_QUEUE = 1000; public void doSubmit(Runnable r){ while(executor.getQueue().size() >= MAX_IN_QUEUE) completionService.poll(100,TimeUnit.MILLISECONDS); completionService.submit(r); }
private void waitForTaskToComplete() throws IOException { boolean completed; for (completed = false; completionService.poll() != null; completed = true) { // keep polling until there is no data } if (!completed) { try { completionService.take(); } catch (InterruptedException e) { lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e); throw lastError; } } }
/** * This attempts to cancel out all pending and in progress tasks. Does not interrupt the running * tasks itself. An ongoing HRegion.flush() should not be interrupted (see HBASE-13877). * @throws InterruptedException */ void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = futures; LOG.debug("cancelling " + tasks.size() + " flush region tasks " + name); for (Future<Void> f: tasks) { f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); while (taskPool.poll() != null) {} stop(); }
/** * This attempts to cancel out all pending and in progress tasks (interruptions issues) * @throws InterruptedException */ void cancelTasks() throws InterruptedException { Collection<Future<Void>> tasks = futures; LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name); for (Future<Void> f: tasks) { // TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there // are places in the HBase code where row/region locks are taken and not released in a // finally block. Thus we cancel without interrupting. Cancellations will be slower to // complete but we won't suffer from unreleased locks due to poor code discipline. f.cancel(false); } // evict remaining tasks and futures from taskPool. futures.clear(); while (taskPool.poll() != null) {} stop(); }
/** * Waits for at least one task to complete. * * @throws IOException * If an I/O error occurs. In particular, an IOException may be thrown if the output stream has been * closed. */ private void waitForTaskToComplete() throws IOException { boolean completed = false; while (this.completionService.poll() != null) { completed = true; } if (!completed) { try { this.completionService.take(); } catch (final InterruptedException e) { throw Utility.initIOException(e); } } }
/** * Waits for at least one task to complete. * * @throws IOException * If an I/O error occurs. In particular, an IOException may be thrown if the output stream has been * closed. */ private void waitForTaskToComplete() throws IOException { boolean completed = false; while (this.completionService.poll() != null) { completed = true; } if (!completed) { try { this.completionService.take(); } catch (final InterruptedException e) { throw Utility.initIOException(e); } } }
@After public void testTearDown() throws Exception { while (true) { Future<Object> f = executorService.poll(); if (f == null) break; f.get(); } }
public void execute(List<Runnable> tasks) throws InterruptedException { LinkedList<Runnable> todo = Lists.newLinkedList(tasks); Collections.shuffle(todo); Stopwatch stopwatch = Stopwatch.createStarted(); int numActive = 0; while(todo.size() > 0 || numActive > 0) { int maxConcurrent = maxConcurrent(stopwatch); while (numActive < maxConcurrent && !todo.isEmpty()) { executorService.submit(todo.pop(), null); numActive++; } while(executorService.poll() != null) { numActive--; } Thread.sleep(100); } }
@Test public void sample_21() throws Exception { long id = 42; ExecutorService pool = Executors.newFixedThreadPool(10); List<TravelAgency> agencies = Collections.singletonList(new SomeTravelAgency()); User user = findById(id); GeoLocation location = locate(); ExecutorCompletionService<Flight> ecs = new ExecutorCompletionService<>(pool); agencies.forEach(agency -> ecs.submit(() -> agency.search(user, location))); Future<Flight> firstFlight = ecs.poll(5, SECONDS); Flight flight = firstFlight.get(); book(flight); }