@Override public void interrupted(final InterruptedException e) { exception.addException(e); } }
void deleteEverything(Set<String> skipNames) throws IOException { final DeferredException deleteException = new DeferredException(); for(Entry<String, LuceneSearchIndex> index : indexes.asMap().entrySet()){ if (!skipNames.contains(index.getKey())) { try{ index.getValue().deleteEverything(); }catch(IOException e){ deleteException.addException(e); } } } try{ deleteException.close(); }catch(IOException ex){ throw ex; }catch(Exception ex){ throw new IOException("Failure deleting indeices.", ex); } }
@Override public void run() { try { UserResult finalResult = result; if(deferred.hasException()){ finalResult = finalResult.withException(deferred.getAndClear()); } innerObserver.attemptCompletion(finalResult); } finally { cd.countDown(); } } });
@Override public void close() throws Exception { if (COLLECT_METRICS) { MetricUtils.removeAllMetricsThatStartWith(METRICS_PREFIX); } maps.invalidateAll(); closeException.suppressingClose(defaultHandle); closeException.suppressingClose(db); closeException.close(); }
@Override public void waitForCompletion() { try { completionLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); exception.addException(ex); } exception.throwNoClearRuntime(); }
public void start(ExecutionPlan plan, AttemptObserver observer) throws Exception { try{ startFragments(plan, observer); } catch(ForemanException ex){ exception.addException(ex); } finally { exception.close(); } }
final DeferredException ex = new DeferredException(); for(Host host1 : hosts){ try { if(ex.hasException()){ StringBuilder sb = new StringBuilder(); sb.append("One or more failures trying to get host list from a defined seed host. Update was ultimately succesful connecting to "); sb.append(":\n\n"); sb.append(getAvailableHosts()); logger.info(sb.toString(), ex.getAndClear()); } else { StringBuilder sb = new StringBuilder(); ex.addException(new RuntimeException(String.format("Failure getting server list from seed host ", host1.toCompound()), e)); throw new RuntimeException("Unable to get host lists from any host seed.", ex.getAndClear());
private void exclusively(ExclusiveOperation operation) throws IOException { // Attempt to acquire all exclusive locks to limit concurrent writes occurring. ArrayList<AutoCloseableLock> acquiredLocks = new ArrayList<>(exclusiveLocks.length); for (int i = 0; i < exclusiveLocks.length; i++) { try { // We cannot ensure that all write locks can be acquired, so a best attempt must be made. // If lock is still held after waiting 3 seconds, continue with the lock acquisition and close. // Note: The data from the concurrent write cannot be guaranteed to be persisted on restart. if (exclusiveLocks[i].tryOpen(3L, TimeUnit.SECONDS) != null) { acquiredLocks.add(exclusiveLocks[i]); } } catch (InterruptedException e) { // Do nothing. } } try(DeferredException deferred = new DeferredException()) { try { operation.execute(deferred); } catch(RocksDBException e) { deferred.addException(e); } deferred.suppressingClose(AutoCloseables.all(acquiredLocks)); } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException(e); } }
@Override @VisibleForTesting public void deleteAllValues() throws IOException { exclusively((deferred) -> { synchronized(this) { deleteAllIterators(deferred); try { db.dropColumnFamily(handle); } catch(RocksDBException ex) { deferred.addException(ex); } deferred.suppressingClose(handle); try { this.handle = db.createColumnFamily(family); } catch (Exception ex) { deferred.addException(ex); } } }); }
@Override public void close() throws Exception { DeferredException ex = new DeferredException(); if(plugins != null) { try (AutoCloseableLock l = plugins.writeLock()) { ex.suppressingClose(plugins); } } if(refresher != null) { refresher.cancel(false); } ex.suppressingClose(protocol); ex.suppressingClose(allocator); ex.close(); }
@Override public void test() throws Exception { for (final String[] data: datum) { final String query = buildQuery(data); final E expected = buildResult(data); try { doTest(query, expected); } catch (final Exception|Error ex) { final ParameterizedTestFailure failure = new ParameterizedTestFailure(Joiner.on(" -- ").join(data), ex); if (!forgiving) { throw failure; } exception.addException(failure); } } exception.throwAndClear(); } }
final AtomicReference<RelNode> logicalPlan = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); final DeferredException ex = new DeferredException(); Job job = jobsService.submitJob(JobRequest.newBuilder() .setSqlQuery(new SqlQuery("select n_regionkey, max(n_nationkey) as max_nation from cp.\"tpch/nation.parquet\" group by n_regionkey", SYSTEM_USERNAME)) Assert.fail("Acceleration job was not completed within allowed timeout."); ex.close(); long currentTime = System.currentTimeMillis(); RelNode newLogicalPlan = logicalPlan.get().accept(new MaterializationShuttle(IncrementalUpdateUtils.UPDATE_COLUMN, currentTime));
deferredException.suppressingClose(pipeline); deferredException.suppressingClose(buffers); deferredException.suppressingClose(contextCreator); deferredException.suppressingClose(outputAllocator); deferredException.suppressingClose(allocator); deferredException.suppressingClose(ticket); if(deferredException.hasException()){ transitionToFailed(null); final UserException uex = UserException.systemError(deferredException.getAndClear()) .addIdentity(fragment.getAssignment()) .addContext("Fragment", handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId())
private final DeferredException closeException = new DeferredException();
@Override public void close() throws Exception { indexes.invalidateAll(); indexes.cleanUp(); closeException.close(); } }
/** * If an exception exists, will throw the exception and then clear it. This is so in cases where want to reuse * DeferredException, we don't double report the same exception. * * @throws Exception */ public synchronized void throwAndClear() throws Exception { final Exception e = getAndClear(); if (e != null) { throw e; } }
Exception getException() { return exception.getException(); }
private void deleteAllIterators(DeferredException ex) { // It is "safe" to iterate while adding/removing entries (also changes might not be visible) // It is not safe to have two threads iterating at the same time synchronized(iteratorSet) { for(IteratorReference ref : iteratorSet){ ex.suppressingClose(ref); } } }
if(deferredException.hasException()){ transitionToFailed(null); return;
@Override public RecordBatches load(int offset, int limit) { try { completionLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); exception.addException(ex); } exception.throwNoClearRuntime(); return jobResultsStore.loadJobData(id, store.get(id), offset, limit); }