Refine search
public SocketTextStreamFunction(String hostname, int port, String delimiter, long maxNumRetries, long delayBetweenRetries) { checkArgument(port > 0 && port < 65536, "port is out of range"); checkArgument(maxNumRetries >= -1, "maxNumRetries must be zero or larger (num retries), or -1 (infinite retries)"); checkArgument(delayBetweenRetries >= 0, "delayBetweenRetries must be zero or positive"); this.hostname = checkNotNull(hostname, "hostname must not be null"); this.port = port; this.delimiter = delimiter; this.maxNumRetries = maxNumRetries; this.delayBetweenRetries = delayBetweenRetries; }
private RestClusterClientConfiguration( final RestClientConfiguration endpointConfiguration, final long awaitLeaderTimeout, final int retryMaxAttempts, final long retryDelay) { checkArgument(awaitLeaderTimeout >= 0, "awaitLeaderTimeout must be equal to or greater than 0"); checkArgument(retryMaxAttempts >= 0, "retryMaxAttempts must be equal to or greater than 0"); checkArgument(retryDelay >= 0, "retryDelay must be equal to or greater than 0"); this.restClientConfiguration = Preconditions.checkNotNull(endpointConfiguration); this.awaitLeaderTimeout = awaitLeaderTimeout; this.retryMaxAttempts = retryMaxAttempts; this.retryDelay = retryDelay; }
/** * Creates a new {@code StreamingFileSink} that writes files to the given base directory. */ private StreamingFileSink( final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder, final long bucketCheckInterval) { Preconditions.checkArgument(bucketCheckInterval > 0L); this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder); this.bucketCheckInterval = bucketCheckInterval; }
public AsyncWaitOperator( AsyncFunction<IN, OUT> asyncFunction, long timeout, int capacity, AsyncDataStream.OutputMode outputMode) { super(asyncFunction); chainingStrategy = ChainingStrategy.ALWAYS; Preconditions.checkArgument(capacity > 0, "The number of concurrent async operation should be greater than 0."); this.capacity = capacity; this.outputMode = Preconditions.checkNotNull(outputMode, "outputMode"); this.timeout = timeout; }
public ChunkedByteBuf(ByteBuf buf, int chunkSize) { this.buf = Preconditions.checkNotNull(buf, "Buffer"); Preconditions.checkArgument(chunkSize > 0, "Non-positive chunk size"); this.chunkSize = chunkSize; }
public KeyGroupStreamPartitioner(KeySelector<T, K> keySelector, int maxParallelism) { Preconditions.checkArgument(maxParallelism > 0, "Number of key-groups must be > 0!"); this.keySelector = Preconditions.checkNotNull(keySelector); this.maxParallelism = maxParallelism; }
public AggregatingUdf(AggregationFunction<Object>[] aggFunctions, int[] fieldPositions) { Preconditions.checkNotNull(aggFunctions); Preconditions.checkNotNull(aggFunctions); Preconditions.checkArgument(aggFunctions.length == fieldPositions.length); this.aggFunctions = aggFunctions; this.fieldPositions = fieldPositions; }
public BackPressuringExecutor(Executor delegate, int numConcurrentExecutions) { checkArgument(numConcurrentExecutions > 0, "numConcurrentExecutions must be > 0"); this.delegate = checkNotNull(delegate, "delegate"); this.permits = new Semaphore(numConcurrentExecutions, true); }
Elasticsearch6ApiCallBridge(List<HttpHost> httpHosts, RestClientFactory restClientFactory) { Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty()); this.httpHosts = httpHosts; this.restClientFactory = Preconditions.checkNotNull(restClientFactory); }
protected PartFileWriter( final BucketID bucketId, final RecoverableFsDataOutputStream currentPartStream, final long creationTime) { Preconditions.checkArgument(creationTime >= 0L); this.bucketId = Preconditions.checkNotNull(bucketId); this.currentPartStream = Preconditions.checkNotNull(currentPartStream); this.creationTime = creationTime; this.lastUpdateTime = creationTime; }
/** * Creates a resumable for the given file at the given position. * * @param targetFile The file to resume. * @param offset The position to resume from. */ HadoopFsRecoverable(Path targetFile, Path tempFile, long offset) { checkArgument(offset >= 0, "offset must be >= 0"); this.targetFile = checkNotNull(targetFile, "targetFile"); this.tempFile = checkNotNull(tempFile, "tempFile"); this.offset = offset; }
/** * Creates a resumable for the given file at the given position. * * @param targetFile The file to resume. * @param offset The position to resume from. */ LocalRecoverable(File targetFile, File tempFile, long offset) { checkArgument(offset >= 0, "offset must be >= 0"); this.targetFile = checkNotNull(targetFile, "targetFile"); this.tempFile = checkNotNull(tempFile, "tempFile"); this.offset = offset; }
public EnumSerializer(Class<T> enumClass) { this.enumClass = checkNotNull(enumClass); checkArgument(Enum.class.isAssignableFrom(enumClass), "not an enum"); this.values = enumClass.getEnumConstants(); checkArgument(this.values.length > 0, "cannot use an empty enum"); this.valueToOrdinal = new HashMap<>(values.length); int i = 0; for (T value : values) { this.valueToOrdinal.put(value, i++); } }
public AutoClosablePath(final Path path) { Preconditions.checkNotNull(path, "Path must not be null."); Preconditions.checkArgument(path.isAbsolute(), "Path must be absolute."); this.path = path; }
public CassandraOutputFormatBase(String insertQuery, ClusterBuilder builder) { Preconditions.checkArgument(!Strings.isNullOrEmpty(insertQuery), "Query cannot be null or empty"); Preconditions.checkNotNull(builder, "Builder cannot be null"); this.insertQuery = insertQuery; this.builder = builder; }
/** * Creates a new named {@code OutputTag} with the given id and output {@link TypeInformation}. * * @param id The id of the created {@code OutputTag}. * @param typeInfo The {@code TypeInformation} for the side output. */ public OutputTag(String id, TypeInformation<T> typeInfo) { Preconditions.checkNotNull(id, "OutputTag id cannot be null."); Preconditions.checkArgument(!id.isEmpty(), "OutputTag id must not be empty."); this.id = id; this.typeInfo = Preconditions.checkNotNull(typeInfo, "TypeInformation cannot be null."); }
PeriodicOffsetCommitter(ZookeeperOffsetHandler offsetHandler, List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates, ExceptionProxy errorHandler, long commitInterval) { this.offsetHandler = checkNotNull(offsetHandler); this.partitionStates = checkNotNull(partitionStates); this.errorHandler = checkNotNull(errorHandler); this.commitInterval = commitInterval; checkArgument(commitInterval > 0); }
/** * Sets the path of the ORC file(s). * If the path specifies a directory, it will be recursively enumerated. * * @param path The path of the ORC file(s). * @return The builder. */ public Builder path(String path) { Preconditions.checkNotNull(path, "Path must not be null."); Preconditions.checkArgument(!path.isEmpty(), "Path must not be empty."); this.path = path; return this; }
private CassandraSinkBaseConfig( int maxConcurrentRequests, Duration maxConcurrentRequestsTimeout) { Preconditions.checkArgument(maxConcurrentRequests > 0, "Max concurrent requests is expected to be positive"); Preconditions.checkNotNull(maxConcurrentRequestsTimeout, "Max concurrent requests timeout cannot be null"); Preconditions.checkArgument(!maxConcurrentRequestsTimeout.isNegative(), "Max concurrent requests timeout is expected to be positive"); this.maxConcurrentRequests = maxConcurrentRequests; this.maxConcurrentRequestsTimeout = maxConcurrentRequestsTimeout; }
public RowTypeInfo(TypeInformation<?>[] types, String[] fieldNames) { super(Row.class, types); checkNotNull(fieldNames, "FieldNames should not be null."); checkArgument( types.length == fieldNames.length, "Number of field types and names is different."); checkArgument( !hasDuplicateFieldNames(fieldNames), "Field names are not unique."); this.fieldNames = Arrays.copyOf(fieldNames, fieldNames.length); }