/** * Creates a new user exception builder. * * @see com.dremio.exec.proto.UserBitShared.DremioPBError.ErrorType#DATA_READ * @return user exception builder */ public static Builder dataReadError() { return dataReadError(null); }
@Override public UserException.Builder getExceptionWithContext(Throwable e, String field, String msg, Object... args) { UserException.Builder exceptionBuilder = UserException.dataReadError(e); return getExceptionWithContext(exceptionBuilder, field, msg, args); }
/** * Validates the list type as expected. * @param arrowSchema current arrow schema * @return child schema */ private List<Field> handleRepeatedField(List<Field> arrowSchema, GroupType groupType) { // validating that the list type is as expected if (arrowSchema.size() != 1 && !arrowSchema.get(0).getName().equals("$data$")) { UserException.dataReadError() .message("invalid children. Expected a single child named $data$, was actually %s for repeated type %s. ", arrowSchema, groupType); } // in the case of list, we skip over the inner type (struct = list(nameForChild).struct() bellow) return arrowSchema.get(0).getChildren(); }
private UserException.Builder getExceptionBuilder(final Exception ex, final HashAggErrorType errorType) { switch (errorType) { case SPILL_READ: return (ex == null) ? (UserException.dataReadError()) : (UserException.dataReadError(ex)); case SPILL_WRITE: return (ex == null) ? (UserException.dataWriteError()) : (UserException.dataWriteError(ex)); case OOM: return (ex == null) ? (UserException.memoryError()) : (UserException.memoryError(ex)); default: /* should never be hit since VectorizedHashAggOperator controls the operator type */ return null; } }
@Override public Void run() throws Exception { try { connection = ConnectionFactory.createConnection(config); logger.info("Connection created: {}.", connection); } catch (IOException ex) { throw UserException.dataReadError(ex).message("Failure while connecting to HBase.").build(logger); } return null; } },
private UserException userExceptionWithDiagnosticInfo(final Throwable t, final int count) { return UserException.dataReadError(t) .message("Failed to populate partition column values") .addContext("Partition value characteristics", populators != null ? Joiner.on(",").join(populators) : "null") .addContext("Number of rows trying to populate", count) .build(logger); }
private void confirmLast() throws IOException{ parser.nextToken(); if(!parser.isClosed()){ throw getExceptionWithContext( UserException.dataReadError(), currentFieldName, null) .message("Dremio attempted to unwrap a toplevel list " + "in your document. However, it appears that there is trailing content after this top level list. Dremio only " + "supports querying a set of distinct maps or a single json array with multiple inner maps.") .build(logger); } }
private static long getMillis(SchemaPath path, String value, FormatterAndType[] formatters){ for (FormatterAndType format : formatters) { try { return format.parseToLong(value); } catch (IllegalArgumentException e) { logger.debug("Failed to parse date time value {} with format {}", value, format, e); } } throw UserException.dataReadError() .message("Failed to parse date time value %s in field %s.", value, path.getAsUnescapedPath()) .build(logger); }
@Override protected BatchSchema constructSchema(FunctionLookupContext context) { List<PhysicalOperator> children = getChildren(); BatchSchema left = children.get(0).getSchema(context); BatchSchema right = children.get(1).getSchema(context); if(!right.equalsTypesAndPositions(left)){ throw UserException.dataReadError() .message("Unable to complete query, attempting to union two datasets that have different underlying schemas. Left: %s, Right: %s", left, right) .build(logger); } return left; }
public List<DatasetSplit> getSplits() { try{ return delegate.getSplits(); }catch(Exception e){ throw UserException.dataReadError(e).message("Failure while attempting to retrieve parallelization information for table %s.", getName()).build(logger); } }
public DatasetConfig getDataset(){ try{ return delegate.getDataset(); }catch(Exception e){ throw UserException.dataReadError(e).message("Failure while attempting to retrieve metadata information for table %s.", getName()).build(logger); } }
/** * Attempt to consolidate disk runs if necessary. If the diskRunManager indicates consolidation is complete, create * the copier and update the sort state to COPY_FROM_DISK */ private void consolidateIfNecessary() { try { if (diskRuns.consolidateAsNecessary()) { copier = diskRuns.createCopier(); sortState = SortState.COPY_FROM_DISK; } } catch (Exception ex) { throw UserException.dataReadError(ex).message("Failure while attempting to read spill data from disk.") .build(logger); } }
@Override public Void run() throws Exception { try( Connection c = ConnectionFactory.createConnection(testConfig); Admin admin = c.getAdmin(); ) { admin.listNamespaceDescriptors(); }catch(IOException ex) { throw UserException.dataReadError(ex).message("Failure while connecting to HBase.").build(logger); } return null; } },
private final void skipOptionalBOM() throws IOException { if (checkBom(ByteOrderMark.UTF_8)) { bufferPtr += ByteOrderMark.UTF_8.length(); } else if (checkBom(ByteOrderMark.UTF_16LE) || checkBom(ByteOrderMark.UTF_16BE)) { throw UserException.dataReadError() .message("UTF-16 files not supported") .build(logger); } }
private void write(BitWriter writer, JsonToken token, JsonParser parser) throws IOException { // TODO JASON - finish other cases if (token == JsonToken.VALUE_NUMBER_INT) { writer.writeBit(parser.getIntValue() == 1 ? 1 : 0); } else if (token == JsonToken.VALUE_TRUE || token == JsonToken.VALUE_FALSE){ writer.writeBit(parser.getBooleanValue() ? 1 : 0); } else if (token == JsonToken.VALUE_STRING) { writer.writeBit(ElasticsearchJsonReader.parseElasticBoolean(parser.getValueAsString()) ? 1 : 0); } else { throw UserException.dataReadError() .message("While reading from elasticsearch, unexpected data type in a boolean column: " + token).build(logger); } }
public RelDataType toCalciteRecordType(RelDataTypeFactory factory, Set<String> fieldBlacklist){ FieldInfoBuilder builder = new FieldInfoBuilder(factory); for(Field f : this) { if(!fieldBlacklist.contains(f.getName())){ builder.add(f.getName(), CompleteType.toCalciteType(f, factory)); } } RelDataType rowType = builder.build(); if(rowType.getFieldCount() == 0){ throw UserException.dataReadError().message("Selected table has no columns.").build(logger); } return rowType; }
@Override public SourceTableDefinition getDataset() { final HiveClient client = getClient(SystemUser.SYSTEM_USERNAME); try { // checkReadSignature() is only called with a datasetConfig coming from the namespace so we can safely // assume it has a canonized path // it may not be safe to always assume so and we should probably figure out a better way to ensure // this assumption in the future return DatasetBuilder.getDatasetBuilder( client, getStorageUser(SystemUser.SYSTEM_USERNAME), new NamespaceKey(datasetConfig.getFullPathList()), true, false, getStatsParams(), hiveConf, datasetConfig); } catch (TException e) { throw UserException.dataReadError(e).message("Failure while retrieving dataset definition.").build(logger); } } };
private UserException failure(SchemaPath path, ElasticField declaredField, CompleteType observedType){ return UserException.dataReadError() .message( "Failure handling type. Dremio only supports a path to type mapping across all schema mappings. \n" + "\tDataset path %s.\n" + "\tPath to field %s.\n" + "\tDeclared Type %s.\n" + "\tObserved Type %s.\n", datasetPath, child(path, declaredField.getName()).getAsUnescapedPath(), Describer.describe(declaredField.toArrowField()), Describer.describe(observedType.toField(declaredField.getName())) ) .build(logger); }
public void createDataset(NamespaceKey key, ManagedStoragePlugin plugin, Function<DatasetConfig, DatasetConfig> datasetMutator){ DatasetConfig config = null; try { config = userNamespaceService.getDataset(key); if(config != null){ throw UserException.validationError().message("Table already exists %s", key.getRoot()).build(logger); } }catch (NamespaceException ex){ logger.debug("Failure while trying to retrieve dataset for key {}.", key, ex); } SourceTableDefinition definition = null; try { definition = plugin.getTable(key, null, false); } catch (Exception ex){ throw UserException.dataReadError(ex).message("Failure while attempting to read metadata for table %s from source.", key).build(logger); } if(definition == null){ throw UserException.validationError().message("Unable to find requested table %s.", key).build(logger); } plugin.getSaver().completeSave(datasetMutator == null ? definition : new MutatedSourceTableDefinition(definition, datasetMutator), config); }
@Test public void testWrapUserException() { UserException uex = UserException.dataReadError().message("this is a data read exception").build(logger); Exception wrapped = wrap(uex, 3); Assert.assertEquals(uex, UserException.systemError(wrapped).build(logger)); }