public AggregatingUdf(AggregationFunction<Object>[] aggFunctions, int[] fieldPositions) { Preconditions.checkNotNull(aggFunctions); Preconditions.checkNotNull(aggFunctions); Preconditions.checkArgument(aggFunctions.length == fieldPositions.length); this.aggFunctions = aggFunctions; this.fieldPositions = fieldPositions; }
public void pushBack(IN element) { Preconditions.checkState(pushBack == null, "Already contains an element that was pushed back. This indicates a programming error."); pushBack = element; } }
public CassandraAppendTableSink(ClusterBuilder builder, String cql, Properties properties) { this.builder = Preconditions.checkNotNull(builder, "ClusterBuilder must not be null."); this.cql = Preconditions.checkNotNull(cql, "CQL query must not be null."); this.properties = Preconditions.checkNotNull(properties, "Properties must not be null."); }
/** * An undirected {@link Graph} connecting every distinct pair of vertices. * * @param env the Flink execution environment * @param vertexCount number of vertices */ public CompleteGraph(ExecutionEnvironment env, long vertexCount) { Preconditions.checkArgument(vertexCount >= MINIMUM_VERTEX_COUNT, "Vertex count must be at least " + MINIMUM_VERTEX_COUNT); this.env = env; this.vertexCount = vertexCount; }
protected Resource(String name, double value, ResourceAggregateType type) { this.name = checkNotNull(name); this.value = value; this.resourceAggregateType = checkNotNull(type); }
/** * Sets the parallelism for the iteration. * * @param parallelism The parallelism. */ public void setParallelism(int parallelism) { Preconditions.checkArgument( parallelism > 0 || parallelism == ExecutionConfig.PARALLELISM_DEFAULT, "The parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default)."); this.parallelism = parallelism; }
/** * Creates a new {@code StreamingFileSink} that writes files to the given base directory. */ private StreamingFileSink( final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder, final long bucketCheckInterval) { Preconditions.checkArgument(bucketCheckInterval > 0L); this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder); this.bucketCheckInterval = bucketCheckInterval; }
/** * Creates a new data input stream from the given Hadoop input stream. * * @param fsDataInputStream The Hadoop input stream */ public HadoopDataInputStream(org.apache.hadoop.fs.FSDataInputStream fsDataInputStream) { this.fsDataInputStream = checkNotNull(fsDataInputStream); }
/** * Filter out Adamic-Adar scores less than the given minimum. * * @param score minimum score * @return this */ public AdamicAdar<K, VV, EV> setMinimumScore(float score) { Preconditions.checkArgument(score >= 0, "Minimum score must be non-negative"); this.minimumScore = score; return this; }
/** * Sets the part size above which a part file will have to roll. * @param size the allowed part size. */ public DefaultRollingPolicy.PolicyBuilder withMaxPartSize(final long size) { Preconditions.checkState(size > 0L); return new PolicyBuilder(size, rolloverInterval, inactivityInterval); }
public AsyncWaitOperator( AsyncFunction<IN, OUT> asyncFunction, long timeout, int capacity, AsyncDataStream.OutputMode outputMode) { super(asyncFunction); chainingStrategy = ChainingStrategy.ALWAYS; Preconditions.checkArgument(capacity > 0, "The number of concurrent async operation should be greater than 0."); this.capacity = capacity; this.outputMode = Preconditions.checkNotNull(outputMode, "outputMode"); this.timeout = timeout; }
/** * Creates the exception. * @param serverName the name of the server that threw the exception. * @param kvStateId the state id for which no state was found. */ public UnknownKvStateIdException(String serverName, KvStateID kvStateId) { super(serverName, "No registered state with ID " + Preconditions.checkNotNull(kvStateId) + '.'); } }
public JDBCInputFormatBuilder setFetchSize(int fetchSize) { Preconditions.checkArgument(fetchSize == Integer.MIN_VALUE || fetchSize > 0, "Illegal value %s for fetchSize, has to be positive or Integer.MIN_VALUE.", fetchSize); format.fetchSize = fetchSize; return this; }
/** * Sets the interval of allowed inactivity after which a part file will have to roll. * @param interval the allowed inactivity interval. */ public DefaultRollingPolicy.PolicyBuilder withInactivityInterval(final long interval) { Preconditions.checkState(interval > 0L); return new PolicyBuilder(partSize, rolloverInterval, interval); }
public SocketTextStreamFunction(String hostname, int port, String delimiter, long maxNumRetries, long delayBetweenRetries) { checkArgument(port > 0 && port < 65536, "port is out of range"); checkArgument(maxNumRetries >= -1, "maxNumRetries must be zero or larger (num retries), or -1 (infinite retries)"); checkArgument(delayBetweenRetries >= 0, "delayBetweenRetries must be zero or positive"); this.hostname = checkNotNull(hostname, "hostname must not be null"); this.port = port; this.delimiter = delimiter; this.maxNumRetries = maxNumRetries; this.delayBetweenRetries = delayBetweenRetries; }
public CassandraPojoInputFormat(String query, ClusterBuilder builder, Class<OUT> inputClass, MapperOptions mapperOptions) { super(query, builder); this.mapperOptions = mapperOptions; this.inputClass = Preconditions.checkNotNull(inputClass, "InputClass cannot be null"); }
/** * Private constructor to avoid direct instantiation. */ private DefaultRollingPolicy(long partSize, long rolloverInterval, long inactivityInterval) { Preconditions.checkArgument(partSize > 0L); Preconditions.checkArgument(rolloverInterval > 0L); Preconditions.checkArgument(inactivityInterval > 0L); this.partSize = partSize; this.rolloverInterval = rolloverInterval; this.inactivityInterval = inactivityInterval; }
@Override protected NullableSerializer<T> createOuterSerializerWithNestedSerializers(TypeSerializer<?>[] nestedSerializers) { checkState(nullPaddingLength >= 0, "Negative padding size after serializer construction: %d", nullPaddingLength); final byte[] padding = (nullPaddingLength == 0) ? EMPTY_BYTE_ARRAY : new byte[nullPaddingLength]; TypeSerializer<T> nestedSerializer = (TypeSerializer<T>) nestedSerializers[0]; return new NullableSerializer<>(nestedSerializer, padding); }
protected PartFileWriter( final BucketID bucketId, final RecoverableFsDataOutputStream currentPartStream, final long creationTime) { Preconditions.checkArgument(creationTime >= 0L); this.bucketId = Preconditions.checkNotNull(bucketId); this.currentPartStream = Preconditions.checkNotNull(currentPartStream); this.creationTime = creationTime; this.lastUpdateTime = creationTime; }
TimeoutSelectBuilder( final PatternSelectFunction<IN, OUT> flatSelectFunction, final PatternTimeoutFunction<IN, TIMED_OUT> timeoutHandler, final OutputTag<TIMED_OUT> outputTag) { this.selectFunction = checkNotNull(flatSelectFunction); this.timeoutHandler = checkNotNull(timeoutHandler); this.outputTag = checkNotNull(outputTag); }