@Override public void interrupted(final InterruptedException ex) { if (!isTerminal.compareAndSet(false, true)) { logger.warn("Received multiple responses to run query request."); return; } // TODO(DRILL-4586) resultsListener.submissionFailed(UserException.systemError(ex) .addContext("The client had been asked to wait as the SabotNode is potentially being over-utilized." + " But the client was interrupted while waiting.") .build(logger)); } }
@Override public void failed(RpcException ex) { if (!isTerminal.compareAndSet(false, true)) { logger.warn("Received multiple responses to run query request."); return; } // Although query submission failed, results might have arrived for this query. // However, the results could not be transferred to this resultListener because // there is no query id mapped to this resultListener. Look out for the warning // message from ChannelClosedHandler in the client logs. // TODO(DRILL-4586) resultsListener.submissionFailed(UserException.systemError(ex) .addContext("Query submission to SabotNode failed.") .build(logger)); }
public UserResult withException(Exception ex) { UserException exception = this.exception; if (exception != null) { exception.addSuppressed(ex); exception = UserException.systemError(exception) .build(logger); } else { exception = UserException.systemError(ex) .build(logger); } QueryProfile profile = this.profile; if (profile != null) { QueryProfile.Builder builder = profile.toBuilder(); builder.setState(QueryState.FAILED); profile = addError(exception, builder).build(); } return new UserResult(extraValue, queryId, QueryState.FAILED, profile, exception, cancelReason, clientCancelled); }
@Test public void testNumericTypes() throws Exception { final File table_dir = tempFolder.newFolder("numericTypes"); final int record_count = 10000; BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(new File(table_dir, "a.json"))); String format = "{ a : %d }%n"; for (int i = 0; i <= record_count; i += 2) { os.write(String.format(format, i).getBytes()); } os.close(); os = new BufferedOutputStream(new FileOutputStream(new File(table_dir, "b.json"))); format = "{ a : %.2f }%n"; for (int i = 1; i <= record_count; i+=2) { os.write(String.format(format, (float) i).getBytes()); } os.close(); String query = String.format("select * from dfs_root.\"%s\" order by a desc", table_dir.toPath().toString()); // First attempt will fail with a schema change error try { attemptTestNumericTypes(query, record_count); } catch (Exception e) { UserExceptionMatcher m = new UserExceptionMatcher(UserBitShared.DremioPBError.ErrorType.DATA_READ, "SCHEMA_CHANGE ERROR"); @SuppressWarnings("deprecation") // deprecated methods used below: usage matches usage in AttemptManager final UserException expectedException = UserException.systemError(e).build(); assertTrue(m.matches(expectedException)); } // Second attempt should work, as we'd have learned the schema attemptTestNumericTypes(query, record_count); }
@Test public void testNumericTypes() throws Exception { final File data_dir = tempFolder.newFolder("topn-schemachanges"); // left side int and strings BufferedWriter writer = new BufferedWriter(new FileWriter(new File(data_dir, "d1.json"))); for (int i = 0; i < 10000; i+=2) { writer.write(String.format("{ \"kl\" : %d , \"vl\": %d }\n", i, i)); } writer.close(); writer = new BufferedWriter(new FileWriter(new File(data_dir, "d2.json"))); for (int i = 1; i < 10000; i+=2) { writer.write(String.format("{ \"kl\" : %f , \"vl\": %f }\n", (float)i, (float)i)); } writer.close(); String query = String.format("select * from dfs_root.\"%s\" order by kl limit 12", data_dir.toPath().toString()); // First query will get a schema change error try { attemptTestNumericTypes(query); } catch (Exception e) { UserExceptionMatcher m = new UserExceptionMatcher(UserBitShared.DremioPBError.ErrorType.DATA_READ, "SCHEMA_CHANGE ERROR"); @SuppressWarnings("deprecation") // deprecated methods used below: usage matches usage in AttemptManager final UserException expectedException = UserException.systemError(e).build(); assertTrue(m.matches(expectedException)); } // Second attempt should work, as we'd have learned the schema attemptTestNumericTypes(query); }
private void checkLastChunk() { int recordsInBatch = -1; // ensure the right number of columns was returned, especially important to ensure selective column read is working if (testValues) { assertEquals( "Unexpected number of output columns from parquet scan.", props.fields.keySet().size(), valuesChecked.keySet().size() ); } for (final String s : valuesChecked.keySet()) { try { if (recordsInBatch == -1 ){ recordsInBatch = valuesChecked.get(s); } else { assertEquals("Mismatched record counts in vectors.", recordsInBatch, valuesChecked.get(s).intValue()); } assertEquals("Record count incorrect for column: " + s, totalRecords, (long) valuesChecked.get(s)); } catch (AssertionError e) { submissionFailed(UserException.systemError(e).build(logger)); } } assertTrue(valuesChecked.keySet().size() > 0); future.set(null); }
@Test public void testNumericAndStringTypes() throws Exception { final File data_dir = tempFolder.newFolder("topn-schemachanges"); // left side int and strings BufferedWriter writer = new BufferedWriter(new FileWriter(new File(data_dir, "d1.json"))); for (int i = 0; i < 1000; i+=2) { writer.write(String.format("{ \"kl\" : %d , \"vl\": %d }\n", i, i)); } writer.close(); writer = new BufferedWriter(new FileWriter(new File(data_dir, "d2.json"))); for (int i = 1; i < 1000; i+=2) { writer.write(String.format("{ \"kl\" : \"%s\" , \"vl\": \"%s\" }\n", i, i)); } writer.close(); String query = String.format("select * from dfs_root.\"%s\" order by kl desc limit 12", data_dir.toPath().toString()); // First query will get a schema change error try { attemptTestNumericAndStringTypes(query); } catch (Exception e) { UserExceptionMatcher m = new UserExceptionMatcher(UserBitShared.DremioPBError.ErrorType.DATA_READ, "SCHEMA_CHANGE ERROR"); @SuppressWarnings("deprecation") // deprecated methods used below: usage matches usage in AttemptManager final UserException expectedException = UserException.systemError(e).build(); assertTrue(m.matches(expectedException)); } // Second attempt should work, as we'd have learned the schema attemptTestNumericAndStringTypes(query); }
/** * Maps internal low-level API protocol to {@link UserResultsListener}-level API protocol. * handles query data messages */ public void batchArrived( ConnectionThrottle throttle, byte[] pBody, ByteBuf dBody ) throws RpcException { final QueryData queryData = RpcBus.get( pBody, QueryData.PARSER ); // Current batch coming in. final ArrowBuf ArrowBuf = (ArrowBuf) dBody; final QueryDataBatch batch = new QueryDataBatch( queryData, ArrowBuf ); final QueryId queryId = queryData.getQueryId(); if (logger.isDebugEnabled()) { logger.debug("batchArrived: queryId = {}", QueryIdHelper.getQueryId(queryId)); } logger.trace( "batchArrived: batch = {}", batch ); final UserResultsListener resultsListener = newUserResultsListener(queryId); // A data case--pass on via dataArrived try { resultsListener.dataArrived(batch, throttle); // That releases batch if successful. } catch ( Exception e ) { batch.release(); resultsListener.submissionFailed(UserException.systemError(e).build(logger)); } }
@Test public void testNumericAndStringTypes() throws Exception { final File table_dir = tempFolder.newFolder("numericAndStringTypes"); final int record_count = 10000; BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(new File(table_dir, "a.json"))); String format = "{ a : %d }%n"; for (int i = 0; i <= record_count; i += 2) { os.write(String.format(format, i).getBytes()); } os.close(); os = new BufferedOutputStream(new FileOutputStream(new File(table_dir, "b.json"))); format = "{ a : \"%05d\" }%n"; for (int i = 1; i <= record_count; i+=2) { os.write(String.format(format, i).getBytes()); } os.close(); String query = String.format("select * from dfs_root.\"%s\" order by a desc", table_dir.toPath().toString()); // First attempt will fail with a schema change error try { attemptTestNumericAndStringTypes(query, record_count); } catch (Exception e) { UserExceptionMatcher m = new UserExceptionMatcher(UserBitShared.DremioPBError.ErrorType.DATA_READ, "SCHEMA_CHANGE ERROR"); @SuppressWarnings("deprecation") // deprecated methods used below: usage matches usage in AttemptManager final UserException expectedException = UserException.systemError(e).build(); assertTrue(m.matches(expectedException)); } // Second attempt should work, as we'd have learned the schema attemptTestNumericAndStringTypes(query, record_count); } }
resultsListener.submissionFailed(UserException.systemError(t).build(logger)); Throwables.propagateIfPossible(t); } finally {
@Test public void testBuildSystemException() { String message = "This is an exception"; UserException uex = UserException.systemError(new Exception(new RuntimeException(message))).build(logger); Assert.assertTrue(uex.getOriginalMessage().contains(message)); Assert.assertTrue(uex.getOriginalMessage().contains("RuntimeException")); DremioPBError error = uex.getOrCreatePBError(true); Assert.assertEquals(ErrorType.SYSTEM, error.getErrorType()); }
protected RuntimeException contextualize(Throwable e) { String operatorName = "Unknown"; int operatorId = -1; try { operatorName = getOperatorName(context.getStats().getOperatorType()); }catch(Exception ex){ e.addSuppressed(ex); } try { operatorId = context.getStats().getOperatorId(); }catch(Exception ex){ e.addSuppressed(ex); } final FragmentHandle h = context.getFragmentHandle(); UserException.Builder builder = UserException.systemError(e).message("General execution failure.") .addContext("SqlOperatorImpl", operatorName) .addContext("Location", String.format("%d:%d:%d", h.getMajorFragmentId(), h.getMinorFragmentId(), operatorId)); if (e instanceof OutOfMemoryException || e instanceof OutOfDirectMemoryError || e instanceof OutOfMemoryError) { context.getNodeDebugContextProvider().addMemoryContext(builder); } return builder.build(logger); }
uex = UserException.systemError(resultException).addIdentity(queryContext.getCurrentEndpoint()).build(logger); } else { uex = null;
@Override public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) { final QueryData header = result.getHeader(); final ArrowBuf data = result.getData(); if (data != null) { count.addAndGet(header.getRowCount()); try { loader.load(header.getDef(), data); // TODO: Clean: DRILL-2933: That load(...) no longer throws // SchemaChangeException, so check/clean catch clause below. } catch (SchemaChangeException e) { submissionFailed(UserException.systemError(e).build(logger)); } switch(format) { case TABLE: VectorUtil.showVectorAccessibleContent(loader, columnWidth); break; case TSV: VectorUtil.showVectorAccessibleContent(loader, "\t"); break; case CSV: VectorUtil.showVectorAccessibleContent(loader, ","); break; } loader.clear(); } result.release(); }
@Test public void testWrapUserException() { UserException uex = UserException.dataReadError().message("this is a data read exception").build(logger); Exception wrapped = wrap(uex, 3); Assert.assertEquals(uex, UserException.systemError(wrapped).build(logger)); }
final UserException uex = UserException.systemError(deferredException.getAndClear()) .addIdentity(fragment.getAssignment()) .addContext("Fragment", handle.getMajorFragmentId() + ":" + handle.getMinorFragmentId())