/** * Creates a thread-safe set backed by a hash map and containing the given elements. The set is * backed by a {@link ConcurrentHashMap} instance, and thus carries the same concurrency * guarantees. * * <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be used as an element. The * set is serializable. * * @param elements the elements that the set should contain * @return a new thread-safe set containing those elements (minus duplicates) * @throws NullPointerException if {@code elements} or any of its contents is null * @since 15.0 */ public static <E> Set<E> newConcurrentHashSet(Iterable<? extends E> elements) { Set<E> set = newConcurrentHashSet(); Iterables.addAll(set, elements); return set; }
public SegmentLoadInfo(DataSegment segment) { Preconditions.checkNotNull(segment, "segment"); this.segment = segment; this.servers = Sets.newConcurrentHashSet(); }
/** * Creates a thread-safe set backed by a hash map and containing the given elements. The set is * backed by a {@link ConcurrentHashMap} instance, and thus carries the same concurrency * guarantees. * * <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be used as an element. The * set is serializable. * * @param elements the elements that the set should contain * @return a new thread-safe set containing those elements (minus duplicates) * @throws NullPointerException if {@code elements} or any of its contents is null * @since 15.0 */ public static <E> Set<E> newConcurrentHashSet(Iterable<? extends E> elements) { Set<E> set = newConcurrentHashSet(); Iterables.addAll(set, elements); return set; }
@Override protected Set<String> create(String[] elements) { return Sets.newConcurrentHashSet(Arrays.asList(elements)); } })
/** * {@link AbstractSet} substitute without the potentially-quadratic {@code removeAll} * implementation. */ abstract static class ImprovedAbstractSet<E> extends AbstractSet<E> { @Override public boolean removeAll(Collection<?> c) { return removeAllImpl(this, c); } @Override public boolean retainAll(Collection<?> c) { return super.retainAll(checkNotNull(c)); // GWT compatibility } }
private static ListenableFuture<?> whenAllStages(Collection<SqlStageExecution> stages, Predicate<StageState> predicate) { checkArgument(!stages.isEmpty(), "stages is empty"); Set<StageId> stageIds = newConcurrentHashSet(stages.stream() .map(SqlStageExecution::getStageId) .collect(toSet())); SettableFuture<?> future = SettableFuture.create(); for (SqlStageExecution stage : stages) { stage.addStateChangeListener(state -> { if (predicate.test(state) && stageIds.remove(stage.getStageId()) && stageIds.isEmpty()) { future.set(null); } }); } return future; }
@Override public Set<? extends Position> asyncReplayEntries(Set<? extends Position> positions, ReadEntriesCallback callback, Object ctx) { return Sets.newConcurrentHashSet(); }
public DiffApplier(int diffParallelism, FileSource source, FileDestination destination) { Preconditions.checkNotNull(source); Preconditions.checkNotNull(destination); this.diffsFailedPaths = new ConcurrentSkipListSet<>(); this.refactoredPaths = Sets.newConcurrentHashSet(); this.source = source; this.destination = destination; this.completedFiles = new AtomicInteger(0); this.stopwatch = Stopwatch.createUnstarted(); // configure a bounded queue and a rejectedexecutionpolicy. // In this case CallerRuns may be appropriate. this.workerService = new ThreadPoolExecutor( 0, diffParallelism, 5, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy()); }
public DefaultDistributedSetService() { super(DistributedSetType.instance(), Sets.newConcurrentHashSet()); }
public void testNewConcurrentHashSetFromCollection() { Set<Integer> set = Sets.newConcurrentHashSet(SOME_COLLECTION); verifySetContents(set, SOME_COLLECTION); }
public void testNewConcurrentHashSetEmpty() { Set<Integer> set = Sets.newConcurrentHashSet(); verifySetContents(set, EMPTY_COLLECTION); }
private RootMetricContext(List<Tag<?>> tags) throws NameConflictException { super(ROOT_METRIC_CONTEXT, null, tags, true); this.innerMetricContexts = Sets.newConcurrentHashSet(); this.referenceQueue = new ReferenceQueue<>(); this.referenceQueueExecutorService = ExecutorsUtils.loggingDecorator(MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("GobblinMetrics-ReferenceQueue"))))); this.referenceQueueExecutorService.scheduleWithFixedDelay(new CheckReferenceQueue(), 0, 2, TimeUnit.SECONDS); this.reporters = Sets.newConcurrentHashSet(); this.reportingStarted = false; addShutdownHook(); }
seenExceptionsLocal = newConcurrentHashSet();
protected MetricContext(String name, MetricContext parent, List<Tag<?>> tags, boolean isRoot) throws NameConflictException { Preconditions.checkArgument(!Strings.isNullOrEmpty(name)); this.closer = Closer.create(); try { this.innerMetricContext = this.closer.register(new InnerMetricContext(this, name, parent, tags)); } catch(ExecutionException ee) { throw Throwables.propagate(ee); } this.contextAwareMetricsSet = Sets.newConcurrentHashSet(); this.notificationTargets = Maps.newConcurrentMap(); this.executorServiceOptional = Optional.absent(); this.notificationTimer = new ContextAwareTimer(this, GOBBLIN_METRICS_NOTIFICATIONS_TIMER_NAME); register(this.notificationTimer); if (!isRoot) { RootMetricContext.get().addMetricContext(this); } }
seenExceptionsLocal = newConcurrentHashSet();
@Override public void restore(BackupInput input) { collection = Sets.newConcurrentHashSet(input.readObject()); lockedElements = input.readObject(); transactions = input.readObject(); } }
tasks.computeIfAbsent(node, key -> newConcurrentHashSet()).add(task); nodeTaskMap.addTask(node, task);
public CuratorInventoryManager( CuratorFramework curatorFramework, InventoryManagerConfig config, ExecutorService exec, CuratorInventoryManagerStrategy<ContainerClass, InventoryClass> strategy ) { this.curatorFramework = curatorFramework; this.config = config; this.strategy = strategy; this.containers = new ConcurrentHashMap<>(); this.uninitializedInventory = Sets.newConcurrentHashSet(); this.pathChildrenCacheExecutor = exec; this.cacheFactory = new PathChildrenCacheFactory.Builder() //NOTE: cacheData is temporarily set to false and we get data directly from ZK on each event. //this is a workaround to solve curator's out-of-order events problem //https://issues.apache.org/jira/browse/CURATOR-191 .withCacheData(false) .withCompressed(true) .withExecutorService(pathChildrenCacheExecutor) .withShutdownExecutorOnClose(false) .build(); }
public ContextAwareReporter(String name, Config config) { this.name = name; this.config = config; this.started = false; RootMetricContext.get().addNewReporter(this); this.notificationTargetUUID = RootMetricContext.get().addNotificationTarget(new Function<Notification, Void>() { @Nullable @Override public Void apply(Notification input) { notificationCallback(input); return null; } }); this.contextFilter = ContextFilterFactory.createContextFilter(config); this.contextsToReport = Sets.newConcurrentHashSet(); for (MetricContext context : this.contextFilter.getMatchingContexts()) { this.contextsToReport.add(context.getInnerMetricContext()); } }
@Test public void testFailedConversion() throws Exception { MyAsyncConverter1to1 converter = new MyAsyncConverter1to1(); List<Throwable> errors = Lists.newArrayList(); AtomicBoolean done = new AtomicBoolean(false); WorkUnitState workUnitState = new WorkUnitState(); workUnitState.setProp(AsyncConverter1to1.MAX_CONCURRENT_ASYNC_CONVERSIONS_KEY, 3); RecordStreamWithMetadata<String, String> stream = new RecordStreamWithMetadata<>(Flowable.just("0", MyAsyncConverter1to1.FAIL, "1").map(RecordEnvelope::new), GlobalMetadata.<String>builder().schema("schema").build()); Set<String> outputRecords = Sets.newConcurrentHashSet(); converter.processStream(stream, workUnitState).getRecordStream().subscribeOn(Schedulers.newThread()) .subscribe(r -> outputRecords.add(((RecordEnvelope<String>)r).getRecord()), errors::add, () -> done.set(true)); Assert.assertTrue(ExponentialBackoff.awaitCondition().maxWait(100L).callable(() -> errors.size() > 0).await()); Assert.assertEquals(errors.size(), 1); Assert.assertEquals(errors.get(0).getCause().getMessage(), "injected failure"); }