Refine search
private GatherSumApplyIteration(GatherFunction<VV, EV, M> gather, SumFunction<VV, EV, M> sum, ApplyFunction<K, VV, M> apply, DataSet<Edge<K, EV>> edges, int maximumNumberOfIterations) { Preconditions.checkNotNull(gather); Preconditions.checkNotNull(sum); Preconditions.checkNotNull(apply); Preconditions.checkNotNull(edges); Preconditions.checkArgument(maximumNumberOfIterations > 0, "The maximum number of iterations must be at least one."); this.gather = gather; this.sum = sum; this.apply = apply; this.edgeDataSet = edges; this.maximumNumberOfIterations = maximumNumberOfIterations; }
PeriodicOffsetCommitter(ZookeeperOffsetHandler offsetHandler, List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates, ExceptionProxy errorHandler, long commitInterval) { this.offsetHandler = checkNotNull(offsetHandler); this.partitionStates = checkNotNull(partitionStates); this.errorHandler = checkNotNull(errorHandler); this.commitInterval = commitInterval; checkArgument(commitInterval > 0); }
public AggregatingUdf(AggregationFunction<Object>[] aggFunctions, int[] fieldPositions) { Preconditions.checkNotNull(aggFunctions); Preconditions.checkNotNull(aggFunctions); Preconditions.checkArgument(aggFunctions.length == fieldPositions.length); this.aggFunctions = aggFunctions; this.fieldPositions = fieldPositions; }
protected PartFileWriter( final BucketID bucketId, final RecoverableFsDataOutputStream currentPartStream, final long creationTime) { Preconditions.checkArgument(creationTime >= 0L); this.bucketId = Preconditions.checkNotNull(bucketId); this.currentPartStream = Preconditions.checkNotNull(currentPartStream); this.creationTime = creationTime; this.lastUpdateTime = creationTime; }
/** * Creates a resumable for the given file at the given position. * * @param targetFile The file to resume. * @param offset The position to resume from. */ HadoopFsRecoverable(Path targetFile, Path tempFile, long offset) { checkArgument(offset >= 0, "offset must be >= 0"); this.targetFile = checkNotNull(targetFile, "targetFile"); this.tempFile = checkNotNull(tempFile, "tempFile"); this.offset = offset; }
/** * Creates a resumable for the given file at the given position. * * @param targetFile The file to resume. * @param offset The position to resume from. */ LocalRecoverable(File targetFile, File tempFile, long offset) { checkArgument(offset >= 0, "offset must be >= 0"); this.targetFile = checkNotNull(targetFile, "targetFile"); this.tempFile = checkNotNull(tempFile, "tempFile"); this.offset = offset; }
/** * Creates a new {@code StreamingFileSink} that writes files to the given base directory. */ private StreamingFileSink( final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder, final long bucketCheckInterval) { Preconditions.checkArgument(bucketCheckInterval > 0L); this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder); this.bucketCheckInterval = bucketCheckInterval; }
/** * Creates a new named {@code OutputTag} with the given id and output {@link TypeInformation}. * * @param id The id of the created {@code OutputTag}. * @param typeInfo The {@code TypeInformation} for the side output. */ public OutputTag(String id, TypeInformation<T> typeInfo) { Preconditions.checkNotNull(id, "OutputTag id cannot be null."); Preconditions.checkArgument(!id.isEmpty(), "OutputTag id must not be empty."); this.id = id; this.typeInfo = Preconditions.checkNotNull(typeInfo, "TypeInformation cannot be null."); }
public AsyncWaitOperator( AsyncFunction<IN, OUT> asyncFunction, long timeout, int capacity, AsyncDataStream.OutputMode outputMode) { super(asyncFunction); chainingStrategy = ChainingStrategy.ALWAYS; Preconditions.checkArgument(capacity > 0, "The number of concurrent async operation should be greater than 0."); this.capacity = capacity; this.outputMode = Preconditions.checkNotNull(outputMode, "outputMode"); this.timeout = timeout; }
public ChunkedByteBuf(ByteBuf buf, int chunkSize) { this.buf = Preconditions.checkNotNull(buf, "Buffer"); Preconditions.checkArgument(chunkSize > 0, "Non-positive chunk size"); this.chunkSize = chunkSize; }
public KeyGroupStreamPartitioner(KeySelector<T, K> keySelector, int maxParallelism) { Preconditions.checkArgument(maxParallelism > 0, "Number of key-groups must be > 0!"); this.keySelector = Preconditions.checkNotNull(keySelector); this.maxParallelism = maxParallelism; }
public BackPressuringExecutor(Executor delegate, int numConcurrentExecutions) { checkArgument(numConcurrentExecutions > 0, "numConcurrentExecutions must be > 0"); this.delegate = checkNotNull(delegate, "delegate"); this.permits = new Semaphore(numConcurrentExecutions, true); }
Elasticsearch6ApiCallBridge(List<HttpHost> httpHosts, RestClientFactory restClientFactory) { Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty()); this.httpHosts = httpHosts; this.restClientFactory = Preconditions.checkNotNull(restClientFactory); }
public AutoClosablePath(final Path path) { Preconditions.checkNotNull(path, "Path must not be null."); Preconditions.checkArgument(path.isAbsolute(), "Path must be absolute."); this.path = path; }
public CassandraOutputFormatBase(String insertQuery, ClusterBuilder builder) { Preconditions.checkArgument(!Strings.isNullOrEmpty(insertQuery), "Query cannot be null or empty"); Preconditions.checkNotNull(builder, "Builder cannot be null"); this.insertQuery = insertQuery; this.builder = builder; }
@Override public CassandraAppendTableSink configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) { CassandraAppendTableSink cassandraTableSink = new CassandraAppendTableSink(this.builder, this.cql, this.properties); cassandraTableSink.fieldNames = Preconditions.checkNotNull(fieldNames, "Field names must not be null."); cassandraTableSink.fieldTypes = Preconditions.checkNotNull(fieldTypes, "Field types must not be null."); Preconditions.checkArgument(fieldNames.length == fieldTypes.length, "Number of provided field names and types does not match."); return cassandraTableSink; }
public OrderedStreamElementQueue( int capacity, Executor executor, OperatorActions operatorActions) { Preconditions.checkArgument(capacity > 0, "The capacity must be larger than 0."); this.capacity = capacity; this.executor = Preconditions.checkNotNull(executor, "executor"); this.operatorActions = Preconditions.checkNotNull(operatorActions, "operatorActions"); this.lock = new ReentrantLock(false); this.headIsCompleted = lock.newCondition(); this.notFull = lock.newCondition(); this.queue = new ArrayDeque<>(capacity); }
/** * Sets the path of the ORC file(s). * If the path specifies a directory, it will be recursively enumerated. * * @param path The path of the ORC file(s). * @return The builder. */ public Builder path(String path) { Preconditions.checkNotNull(path, "Path must not be null."); Preconditions.checkArgument(!path.isEmpty(), "Path must not be empty."); this.path = path; return this; }
public SocketTextStreamFunction(String hostname, int port, String delimiter, long maxNumRetries, long delayBetweenRetries) { checkArgument(port > 0 && port < 65536, "port is out of range"); checkArgument(maxNumRetries >= -1, "maxNumRetries must be zero or larger (num retries), or -1 (infinite retries)"); checkArgument(delayBetweenRetries >= 0, "delayBetweenRetries must be zero or positive"); this.hostname = checkNotNull(hostname, "hostname must not be null"); this.port = port; this.delimiter = delimiter; this.maxNumRetries = maxNumRetries; this.delayBetweenRetries = delayBetweenRetries; }
private VertexCentricIteration(ComputeFunction<K, VV, EV, Message> cf, DataSet<Edge<K, EV>> edgesWithValue, MessageCombiner<K, Message> mc, int maximumNumberOfIterations) { Preconditions.checkNotNull(cf); Preconditions.checkNotNull(edgesWithValue); Preconditions.checkArgument(maximumNumberOfIterations > 0, "The maximum number of iterations must be at least one."); this.computeFunction = cf; this.edgesWithValue = edgesWithValue; this.combineFunction = mc; this.maximumNumberOfIterations = maximumNumberOfIterations; this.messageType = getMessageType(cf); }