/** * Create a {@link TimestampedHiveDatasetVersion} from a {@link Partition} based on the Modified time of underlying * hdfs data location * @throws IllegalArgumentException when argument is null * @throws IllegalArgumentException when data location of partition is null * @throws IllegalArgumentException when data location of partition doesn't exist * {@inheritDoc} */ @Override protected TimestampedHiveDatasetVersion getDatasetVersion(Partition partition) { try { Preconditions.checkArgument(partition != null, "Argument to method "); Path dataLocation = partition.getDataLocation(); Preconditions .checkArgument(dataLocation != null, "Data location is null for partition " + partition.getCompleteName()); boolean exists = this.fs.exists(dataLocation); Preconditions.checkArgument(exists, "Data location doesn't exist for partition " + partition.getCompleteName()); long modificationTS = this.fs.getFileStatus(dataLocation).getModificationTime(); return new TimestampedHiveDatasetVersion(new DateTime(modificationTS), partition); } catch (IOException e) { throw new RuntimeException(e); } } }
public DeltaBinaryPackingConfig(int blockSizeInValues, int miniBlockNumInABlock) { this.blockSizeInValues = blockSizeInValues; this.miniBlockNumInABlock = miniBlockNumInABlock; double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; Preconditions.checkArgument(miniSize % 8 == 0, "miniBlockSize must be multiple of 8, but it's " + miniSize); this.miniBlockSizeInValues = (int) miniSize; }
/** * @return the index of the last value written to this stream, which * can be passed to {@link #setByte(long, byte)} in order to change it */ public long getCurrentIndex() { checkArgument(bytesUsed > 0, "This is an empty stream"); return bytesUsed - 1; }
/** * @return the index of the last value written to this stream, which * can be passed to {@link #setByte(long, byte)} in order to change it */ public long getCurrentIndex() { checkArgument(bytesUsed > 0, "This is an empty stream"); return bytesUsed - 1; }
public Boundary(MarkerComparator comparator, Marker bound, boolean isInclusive) { parquet.Preconditions.checkArgument(comparator != null, "Comparator cannot be null"); parquet.Preconditions.checkArgument(bound != null, "Bound cannot be null"); this.comparator = comparator; this.bound = bound; this.isInclusive = isInclusive; }
private ColumnDescriptor getColumnDescriptor(ColumnPath columnPath) { ColumnDescriptor cd = columnsAccordingToSchema.get(columnPath); checkArgument(cd != null, "Column " + columnPath + " was not found in schema!"); return cd; } }
/** * Construct a type builder that returns the {@link Type} that was built * when the builder is finished. The {@code returnClass} must be the * expected {@code Type} class. * * @param returnClass a {@code Type} to return from {@link #named(String)} */ protected Builder(Class<P> returnClass) { Preconditions.checkArgument(Type.class.isAssignableFrom(returnClass), "The requested return class must extend Type"); this.returnClass = returnClass; this.parent = null; }
private PrimitiveColumnIO getColumnIO(List<Integer> indexFieldPath) { PrimitiveColumnIO found = columnIOsByIndexFieldPath.get(indexFieldPath); checkArgument(found != null, "Did not find PrimitiveColumnIO for index field path" + indexFieldPath); return found; }
/** * @param initialSlabSize the size to make the first slab * @param maxCapacityHint a hint (not guarantee) of the max amount of data written to this stream */ public CapacityByteArrayOutputStream(int initialSlabSize, int maxCapacityHint) { checkArgument(initialSlabSize > 0, "initialSlabSize must be > 0"); checkArgument(maxCapacityHint > 0, "maxCapacityHint must be > 0"); checkArgument(maxCapacityHint >= initialSlabSize, String.format("maxCapacityHint can't be less than initialSlabSize %d %d", initialSlabSize, maxCapacityHint)); this.initialSlabSize = initialSlabSize; this.maxCapacityHint = maxCapacityHint; reset(); }
/** * @param initialSlabSize the size to make the first slab * @param maxCapacityHint a hint (not guarantee) of the max amount of data written to this stream */ public CapacityByteArrayOutputStream(int initialSlabSize, int maxCapacityHint) { checkArgument(initialSlabSize > 0, "initialSlabSize must be > 0"); checkArgument(maxCapacityHint > 0, "maxCapacityHint must be > 0"); checkArgument(maxCapacityHint >= initialSlabSize, String.format("maxCapacityHint can't be less than initialSlabSize %d %d", initialSlabSize, maxCapacityHint)); this.initialSlabSize = initialSlabSize; this.maxCapacityHint = maxCapacityHint; reset(); }
public final IncrementallyUpdatedFilterPredicate build(FilterPredicate pred) { checkArgument(!built, "This builder has already been used"); IncrementallyUpdatedFilterPredicate incremental = pred.accept(this); built = true; return incremental; }
public static void validateBuffer(byte[] buffer, int off, int len) { Preconditions.checkNotNull(buffer, "buffer"); Preconditions.checkArgument(off >= 0 && len >= 0 && off <= buffer.length - len, "Invalid buffer offset or length: buffer.length=%s off=%s len=%s", buffer.length, off, len); } }
private ColumnChunkMetaData getColumnChunk(ColumnPath columnPath) { ColumnChunkMetaData c = columns.get(columnPath); checkArgument(c != null, "Column " + columnPath.toDotString() + " not found in schema!"); return c; }
public static void setFilterPredicate(Configuration configuration, FilterPredicate filterPredicate) { checkArgument(getUnboundRecordFilter(configuration) == null, "You cannot provide a FilterPredicate after providing an UnboundRecordFilter"); configuration.set(FILTER_PREDICATE + ".human.readable", filterPredicate.toString()); try { SerializationUtil.writeObjectToConfAsBase64(FILTER_PREDICATE, filterPredicate, configuration); } catch (IOException e) { throw new RuntimeException(e); } }
public static void setUnboundRecordFilter(Job job, Class<? extends UnboundRecordFilter> filterClass) { Configuration conf = ContextUtil.getConfiguration(job); checkArgument(getFilterPredicate(conf) == null, "You cannot provide an UnboundRecordFilter after providing a FilterPredicate"); conf.set(UNBOUND_RECORD_FILTER, filterClass.getName()); }
public static NanoTime fromBinary(Binary bytes) { Preconditions.checkArgument(bytes.length() == 12, "Must be 12 bytes"); ByteBuffer buf = bytes.toByteBuffer(); buf.order(ByteOrder.LITTLE_ENDIAN); long timeOfDayNanos = buf.getLong(); int julianDay = buf.getInt(); return new NanoTime(julianDay, timeOfDayNanos); }
public static NanoTime fromBinary(Binary bytes) { Preconditions.checkArgument(bytes.length() == 12, "Must be 12 bytes"); ByteBuffer buf = bytes.toByteBuffer(); buf.order(ByteOrder.LITTLE_ENDIAN); long timeOfDayNanos = buf.getLong(); int julianDay = buf.getInt(); return new NanoTime(julianDay, timeOfDayNanos); }
public RunLengthBitPackingHybridDecoder(int bitWidth, ByteArrayInputStream in) { if (DEBUG) LOG.debug("decoding bitWidth " + bitWidth); Preconditions.checkArgument(bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32"); this.bitWidth = bitWidth; this.packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth); this.in = in; }
protected final T repetition(Type.Repetition repetition) { Preconditions.checkArgument(!repetitionAlreadySet, "Repetition has already been set"); Preconditions.checkNotNull(repetition, "Repetition cannot be null"); this.repetition = repetition; this.repetitionAlreadySet = true; return self(); }
public RunLengthBitPackingHybridEncoder(int bitWidth, int initialCapacity, int pageSize) { if (DEBUG) { LOG.debug(String.format("Encoding: RunLengthBitPackingHybridEncoder with " + "bithWidth: %d initialCapacity %d", bitWidth, initialCapacity)); } Preconditions.checkArgument(bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32"); this.bitWidth = bitWidth; this.baos = new CapacityByteArrayOutputStream(initialCapacity, pageSize); this.packBuffer = new byte[bitWidth]; this.bufferedValues = new int[8]; this.packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth); reset(false); }