@Override public boolean exists(String namespace, String name) { Preconditions.checkArgument(DEFAULT_NAMESPACE.equals(namespace), "Non-default namespaces are not supported"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); return metadataProvider.exists(namespace, name); }
@Override public boolean hasNext() { Preconditions.checkState(state.equals(ReaderWriterState.OPEN), "Attempt to read from a file in state:%s", state); return next != null; }
public Boundary(MarkerComparator comparator, Marker bound, boolean isInclusive) { Preconditions.checkArgument(comparator != null, "Comparator cannot be null"); Preconditions.checkArgument(bound != null, "Bound cannot be null"); this.comparator = comparator; this.bound = bound; this.isInclusive = isInclusive; }
public Builder<E> type(Class<E> type) { Preconditions.checkNotNull(type, "Type cannot be null"); this.type = type; return this; }
@Override @SuppressWarnings("deprecation") public void dropPartition(PartitionKey key) { Preconditions.checkState(descriptor.isPartitioned(), "Attempt to drop a partition on a non-partitioned dataset (name:%s)", name); Preconditions.checkNotNull(key, "Partition key may not be null"); LOG.debug("Dropping partition with key:{} dataset:{}", key, name); Path partitionDirectory = toDirectoryName(directory, key); try { if (!fileSystem.delete(partitionDirectory, true)) { throw new IOException("Partition directory " + partitionDirectory + " for key " + key + " does not exist"); } } catch (IOException e) { throw new DatasetIOException("Unable to locate or drop dataset partition directory " + partitionDirectory, e); } }
public FileSystemDatasetReader(FileSystem fileSystem, Path path, Schema schema, Class<E> type) { Preconditions.checkArgument(fileSystem != null, "FileSystem cannot be null"); Preconditions.checkArgument(path != null, "Path cannot be null"); Preconditions.checkArgument(schema != null, "Schema cannot be null"); this.fileSystem = fileSystem; this.path = path; this.schema = schema; this.type = type; this.state = ReaderWriterState.NEW; }
/** * Configure the dataset's schema. A schema is required, and can be set * using one of the methods: {@code schema}, {@code schemaLiteral}, * {@code schemaUri}, or {@code schemaFromAvroDataFile}. * * @return An instance of the builder for method chaining. */ public Builder schema(Schema schema) { Preconditions.checkNotNull(schema, "Schema cannot be null"); this.schema = schema; return this; }
@Override public boolean delete(String namespace, String name) { Preconditions.checkArgument(DEFAULT_NAMESPACE.equals(namespace), "Non-default namespaces are not supported"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); return metadataProvider.delete(namespace, name); }
private void setOverwrite() { String mode = conf.get(KITE_WRITE_MODE); Preconditions.checkState(mode == null, "Cannot replace existing write mode: " + mode); conf.setEnum(KITE_WRITE_MODE, WriteMode.OVERWRITE); }
public MarkerRange(MarkerComparator comparator) { Preconditions.checkArgument(comparator != null, "Comparator cannot be null."); this.comparator = comparator; this.start = Boundary.NEGATIVE_INFINITY; this.end = Boundary.POSITIVE_INFINITY; }
HiveAbstractMetadataProvider(Configuration conf) { Preconditions.checkNotNull(conf, "Configuration cannot be null"); this.conf = conf; }
@Override public boolean delete(String namespace, String name) { Preconditions.checkArgument(DEFAULT_NAMESPACE.equals(namespace), "Non-default namespaces are not supported"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); return metadataProvider.delete(namespace, name); }
@Override public E next() { Preconditions.checkState(state.equals(ReaderWriterState.OPEN), "Attempt to read from a file in state:%s", state); return iterator.next(); }
public LongFixedSizeRangeFieldPartitioner(String sourceName, @Nullable String name, long size) { super(sourceName, (name == null ? sourceName + "_range" : name), Long.class, Long.class); this.size = size; Preconditions.checkArgument(size > 0, "Size of range buckets is not positive: %s", size); }
HiveAbstractMetadataProvider(Configuration conf) { Preconditions.checkNotNull(conf, "Configuration cannot be null"); this.conf = conf; }
@Override public boolean exists(String namespace, String name) { Preconditions.checkArgument(DEFAULT_NAMESPACE.equals(namespace), "Non-default namespaces are not supported"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); return metadataProvider.exists(namespace, name); }
@Override public boolean hasNext() { Preconditions.checkState(state.equals(ReaderWriterState.OPEN), "Attempt to read from a scanner in state:%s", state); return iterator.hasNext(); }
public HashFieldPartitioner(String sourceName, @Nullable String name, int buckets) { super(sourceName, (name == null ? sourceName + "_hash" : name), Object.class, Integer.class, buckets); Preconditions.checkArgument(buckets > 0, "Number of hash buckets is negative: %s", buckets); }
@Override public boolean exists(String namespace, String name) { Preconditions.checkNotNull(namespace, "Namespace cannot be null"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); try { find(namespace, name); return true; } catch (DatasetNotFoundException e) { return false; } }
@Override public <E> RandomAccessDataset<E> create(String namespace, String name, DatasetDescriptor descriptor, Class<E> type) { Preconditions.checkArgument(DEFAULT_NAMESPACE.equals(namespace), "Non-default namespaces are not supported"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); Preconditions.checkNotNull(descriptor, "Descriptor cannot be null"); DatasetDescriptor newDescriptor = metadataProvider.create(namespace, name, descriptor); return newDataset(namespace, name, newDescriptor, type); }