/** * Sets how many IDs are pre-fetched on the background when one call to * {@link FlakeIdGenerator#newId()} is made. Default is 100. * * @param prefetchCount the desired prefetch count, in the range 1..100,000. * @return this instance for fluent API */ public ClientFlakeIdGeneratorConfig setPrefetchCount(int prefetchCount) { checkTrue(prefetchCount > 0 && prefetchCount <= MAXIMUM_PREFETCH_COUNT, "prefetch-count must be 1.." + MAXIMUM_PREFETCH_COUNT + ", not " + prefetchCount); this.prefetchCount = prefetchCount; return this; }
KafkaPartitionAssigner(List<String> topics, int[] partitionCounts, int totalParallelism) { Preconditions.checkTrue(topics.size() == partitionCounts.length, "Different length between topics and partition counts"); this.topics = topics; this.partitionCounts = partitionCounts; this.totalParallelism = totalParallelism; }
KafkaPartitionAssigner(List<String> topics, int[] partitionCounts, int totalParallelism) { Preconditions.checkTrue(topics.size() == partitionCounts.length, "Different length between topics and partition counts"); this.topics = topics; this.partitionCounts = partitionCounts; this.totalParallelism = totalParallelism; }
public CacheGetInvalidationMetaDataOperation(List<String> names) { checkTrue(isNotEmpty(names), "names cannot be null or empty"); this.names = names; }
public LongHashSet(final int capacity, final long missingValue) { checkTrue(capacity <= MAX_CAPACITY, "Maximum capacity is 2^29"); this.capacity = capacity; size = 0; this.missingValue = missingValue; final int arraySize = nextPowerOfTwo(2 * capacity); mask = arraySize - 1; values = new long[arraySize]; Arrays.fill(values, missingValue); // NB: references values in the constructor, so must be assigned after values iterator = new LongIterator(missingValue, values); }
public IntHashSet(final int capacity, final int missingValue) { checkTrue(capacity <= MAX_CAPACITY, "Maximum capacity is 2^29"); this.capacity = capacity; size = 0; this.missingValue = missingValue; final int arraySize = nextPowerOfTwo(2 * capacity); mask = arraySize - 1; values = new int[arraySize]; Arrays.fill(values, missingValue); // NB: references values in the constructor, so must be assigned after values iterator = new IntIterator(missingValue, values); }
public LongHashSet(final int capacity, final long missingValue) { checkTrue(capacity <= MAX_CAPACITY, "Maximum capacity is 2^29"); this.capacity = capacity; size = 0; this.missingValue = missingValue; final int arraySize = nextPowerOfTwo(2 * capacity); mask = arraySize - 1; values = new long[arraySize]; Arrays.fill(values, missingValue); // NB: references values in the constructor, so must be assigned after values iterator = new LongIterator(missingValue, values); }
public GroupP( @Nonnull List<DistributedFunction<?, ? extends K>> groupKeyFns, @Nonnull AggregateOperation<A, R> aggrOp, @Nonnull BiFunction<? super K, ? super R, OUT> mapToOutputFn ) { checkTrue(groupKeyFns.size() == aggrOp.arity(), groupKeyFns.size() + " key functions " + "provided for " + aggrOp.arity() + "-arity aggregate operation"); this.groupKeyFns = groupKeyFns; this.aggrOp = aggrOp; this.resultTraverser = traverseStream(keyToAcc .entrySet().stream() .map(e -> mapToOutputFn.apply(e.getKey(), aggrOp.finishFn().apply(e.getValue())))); }
public IntHashSet(final int capacity, final int missingValue) { checkTrue(capacity <= MAX_CAPACITY, "Maximum capacity is 2^29"); this.capacity = capacity; size = 0; this.missingValue = missingValue; final int arraySize = nextPowerOfTwo(2 * capacity); mask = arraySize - 1; values = new int[arraySize]; Arrays.fill(values, missingValue); // NB: references values in the constructor, so must be assigned after values iterator = new IntIterator(missingValue, values); }
SlidingWindowDef(long windowSize, long slideBy) { checkPositive(windowSize, "windowSize must be positive"); checkPositive(slideBy, "slideBy must be positive"); checkTrue(windowSize % slideBy == 0, "windowSize must be integer multiple of slideBy, mod(" + windowSize + ", " + slideBy + ") != 0"); this.windowSize = windowSize; this.slideBy = slideBy; }
@Override public void loadAll(boolean replaceExistingValues) { checkTrue(isMapStoreEnabled(), "First you should configure a map store"); loadAllInternal(replaceExistingValues); }
private static NearCacheConfig checkNearCacheConfig(NearCacheConfig nearCacheConfig, NativeMemoryConfig nativeMemoryConfig) { InMemoryFormat inMemoryFormat = nearCacheConfig.getInMemoryFormat(); if (inMemoryFormat != NATIVE) { return nearCacheConfig; } checkTrue(nativeMemoryConfig.isEnabled(), "Enable native memory config to use NATIVE in-memory-format for Near Cache"); return nearCacheConfig; }
private static NearCacheConfig checkNearCacheConfig(NearCacheConfig nearCacheConfig, NativeMemoryConfig nativeMemoryConfig) { InMemoryFormat inMemoryFormat = nearCacheConfig.getInMemoryFormat(); if (inMemoryFormat != NATIVE) { return nearCacheConfig; } checkTrue(nativeMemoryConfig.isEnabled(), "Enable native memory config to use NATIVE in-memory-format for Near Cache"); return nearCacheConfig; }
public SkewReductionPolicy(int numQueues, long maxSkew, long priorityDrainingThreshold, boolean forceAdvanceWm) { checkNotNegative(maxSkew, "maxSkew must not be a negative number"); checkNotNegative(priorityDrainingThreshold, "priorityDrainingThreshold must not be a negative number"); checkTrue(priorityDrainingThreshold <= maxSkew, "priorityDrainingThreshold must be less than maxSkew"); this.maxSkew = maxSkew; this.priorityDrainingThreshold = priorityDrainingThreshold; this.forceAdvanceWm = forceAdvanceWm; queueWms = new long[numQueues]; Arrays.fill(queueWms, Long.MIN_VALUE); drainOrderToQIdx = new int[numQueues]; Arrays.setAll(drainOrderToQIdx, i -> i); }
SlidingWindowPolicy(long frameSize, long frameOffset, long framesPerWindow) { checkPositive(frameSize, "frameLength must be positive"); checkNotNegative(frameOffset, "frameOffset must not be negative"); checkTrue(frameOffset < frameSize, "frameOffset must be less than frameSize, offset=" + frameOffset + ", size=" + frameSize); checkPositive(framesPerWindow, "framesPerWindow must be positive"); this.frameSize = frameSize; this.frameOffset = frameOffset; this.windowSize = frameSize * framesPerWindow; }
@Override public void loadAll(Set<K> keys, boolean replaceExistingValues) { checkTrue(isMapStoreEnabled(), "First you should configure a map store"); checkNotNull(keys, NULL_KEYS_ARE_NOT_ALLOWED); checkNoNullInside(keys, NULL_KEY_IS_NOT_ALLOWED); loadInternal(keys, null, replaceExistingValues); }
@Override public ICompletableFuture<Long> addAllAsync(Collection<? extends E> collection, OverflowPolicy overflowPolicy) { checkNotNull(collection, "collection can't be null"); checkNotNull(overflowPolicy, "overflowPolicy can't be null"); checkFalse(collection.isEmpty(), "collection can't be empty"); checkTrue(collection.size() <= MAX_BATCH_SIZE, "collection can't be larger than " + MAX_BATCH_SIZE); Operation op = new AddAllOperation(name, toDataArray(collection), overflowPolicy) .setPartitionId(partitionId); OperationService operationService = getOperationService(); return operationService.createInvocationBuilder(null, op, partitionId) .setCallTimeout(Long.MAX_VALUE) .invoke(); }
@Override public ICompletableFuture<Long> addAllAsync(Collection<? extends E> collection, OverflowPolicy overflowPolicy) { checkNotNull(collection, "collection can't be null"); checkNotNull(overflowPolicy, "overflowPolicy can't be null"); checkFalse(collection.isEmpty(), "collection can't be empty"); checkTrue(collection.size() <= MAX_BATCH_SIZE, "collection can't be larger than " + MAX_BATCH_SIZE); Operation op = new AddAllOperation(name, toDataArray(collection), overflowPolicy) .setPartitionId(partitionId); OperationService operationService = getOperationService(); return operationService.createInvocationBuilder(null, op, partitionId) .setCallTimeout(Long.MAX_VALUE) .invoke(); }
@Override public <SuppliedValue, Result> Result aggregate(Supplier<K, V, SuppliedValue> supplier, Aggregation<K, SuppliedValue, Result> aggregation) { checkTrue(NATIVE != mapConfig.getInMemoryFormat(), "NATIVE storage format is not supported for MapReduce"); HazelcastInstance hazelcastInstance = getNodeEngine().getHazelcastInstance(); JobTracker jobTracker = hazelcastInstance.getJobTracker("hz::aggregation-map-" + getName()); return aggregate(supplier, aggregation, jobTracker); }
@Override public <SuppliedValue, Result> Result aggregate(Supplier<K, V, SuppliedValue> supplier, Aggregation<K, SuppliedValue, Result> aggregation) { checkTrue(NATIVE != mapConfig.getInMemoryFormat(), "NATIVE storage format is not supported for MapReduce"); HazelcastInstance hazelcastInstance = getNodeEngine().getHazelcastInstance(); JobTracker jobTracker = hazelcastInstance.getJobTracker("hz::aggregation-map-" + getName()); return aggregate(supplier, aggregation, jobTracker); }