public IteratorExecutor(Iterator<Callable<T>> runnableIterator, int numThreads, ThreadFactory threadFactory) { this.numThreads = numThreads; this.iterator = runnableIterator; this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(numThreads, threadFactory)); this.completionService = new ExecutorCompletionService<>(this.executor); this.executed = false; }
public ParallelRunner(int threads, FileSystem fs, FailPolicy failPolicy) { this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelRunner")))); this.fs = fs; this.failPolicy = failPolicy; }
public AbstractTaskStateTracker(int coreThreadPoolSize, Logger logger) { Preconditions.checkArgument(coreThreadPoolSize > 0, "Thread pool size should be positive"); this.taskMetricsUpdaterExecutor = ExecutorsUtils.loggingDecorator( new ScheduledThreadPoolExecutor(coreThreadPoolSize, ExecutorsUtils.newThreadFactory(Optional.of(logger), Optional.of("TaskStateTracker-%d")))); this.logger = logger; }
/** * Start the {@link TaskScheduler}. * * @param name the name of the {@link TaskScheduler} */ @Override final void startImpl(Optional<String> name) { this.executorService = ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool(0, ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), name))); }
private ListeningExecutorService getExecutorService() { return ExecutorsUtils.loggingDecorator( ScalingThreadPoolExecutor.newScalingThreadPool(0, this.threadPoolSize, TimeUnit.SECONDS.toMillis(10))); }
public AsyncTrash(FileSystem fs, Properties properties, String user) throws IOException { int maxDeletingThreads = DEFAULT_MAX_DELETING_THREADS; if (properties.containsKey(MAX_DELETING_THREADS_KEY)) { maxDeletingThreads = Integer.parseInt(properties.getProperty(MAX_DELETING_THREADS_KEY)); } this.innerTrash = TrashFactory.createProxiedTrash(fs, properties, user); this.executor = ExecutorsUtils.loggingDecorator( MoreExecutors.getExitingExecutorService(ScalingThreadPoolExecutor.newScalingThreadPool(0, maxDeletingThreads, 100, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("Async-trash-delete-pool-%d"))))); }
@Override public void run() { try { Tunnel.this.server.register(this.selector, SelectionKey.OP_ACCEPT, ExecutorsUtils.loggingDecorator(new AcceptHandler(Tunnel.this.server, this.selector, Tunnel.this.config))); while (!Thread.interrupted()) { this.selector.select(); Set<SelectionKey> selectionKeys = this.selector.selectedKeys(); for (SelectionKey selectionKey : selectionKeys) { dispatch(selectionKey); } selectionKeys.clear(); } } catch (IOException ioe) { LOG.error("Unhandled IOException. Tunnel will close", ioe); } LOG.info("Closing tunnel"); }
protected HiveRegister(State state) { this.props = new HiveRegProps(state); this.hiveDbRootDir = this.props.getDbRootDir(); this.executor = ExecutorsUtils.loggingDecorator( ScalingThreadPoolExecutor.newScalingThreadPool(0, this.props.getNumThreads(), TimeUnit.SECONDS.toMillis(10), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(getClass().getSimpleName())))); }
private RootMetricContext(List<Tag<?>> tags) throws NameConflictException { super(ROOT_METRIC_CONTEXT, null, tags, true); this.innerMetricContexts = Sets.newConcurrentHashSet(); this.referenceQueue = new ReferenceQueue<>(); this.referenceQueueExecutorService = ExecutorsUtils.loggingDecorator(MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("GobblinMetrics-ReferenceQueue"))))); this.referenceQueueExecutorService.scheduleWithFixedDelay(new CheckReferenceQueue(), 0, 2, TimeUnit.SECONDS); this.reporters = Sets.newConcurrentHashSet(); this.reportingStarted = false; addShutdownHook(); }
this.taskExecutor = ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool( taskExecutorThreadPoolSize, ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("TaskExecutor-%d")))); this.taskCreateAndRunTimer = new Timer(new SlidingTimeWindowReservoir(timerWindowSize, TimeUnit.MINUTES)); this.forkExecutor = ExecutorsUtils.loggingDecorator( new ThreadPoolExecutor(
/** * @param state This is a Job State */ public HiveRegistrationPublisher(State state) { super(state); this.hiveRegister = this.closer.register(HiveRegister.get(state)); this.hivePolicyExecutor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(new HiveRegProps(state).getNumThreads(), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("HivePolicyExecutor-%d")))); this.metricContext = Instrumented.getMetricContext(state, HiveRegistrationPublisher.class); isPathDedupeEnabled = state.getPropAsBoolean(PATH_DEDUPE_ENABLED, this.DEFAULT_PATH_DEDUPE_ENABLED); }
Integer.parseInt(props.getProperty(MAX_CONCURRENT_DATASETS_CLEANED, DEFAULT_MAX_CONCURRENT_DATASETS_CLEANED)), 100, ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("Dataset-cleaner-pool-%d"))); this.service = ExecutorsUtils.loggingDecorator(executor);
ExecutorService executorService = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads)); if(queries == 0) { queries = 50000000l;
public IteratorExecutor(Iterator<Callable<T>> runnableIterator, int numThreads, ThreadFactory threadFactory) { this.numThreads = numThreads; this.iterator = runnableIterator; this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(numThreads, threadFactory)); this.completionService = new ExecutorCompletionService<>(this.executor); this.executed = false; }
public AbstractTaskStateTracker(int coreThreadPoolSize, Logger logger) { Preconditions.checkArgument(coreThreadPoolSize > 0, "Thread pool size should be positive"); this.taskMetricsUpdaterExecutor = ExecutorsUtils.loggingDecorator( new ScheduledThreadPoolExecutor(coreThreadPoolSize, ExecutorsUtils.newThreadFactory(Optional.of(logger), Optional.of("TaskStateTracker-%d")))); this.logger = logger; }
/** * Start the {@link TaskScheduler}. * * @param name the name of the {@link TaskScheduler} */ @Override final void startImpl(Optional<String> name) { this.executorService = ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool(0, ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), name))); }
public ParallelRunner(int threads, FileSystem fs, FailPolicy failPolicy) { this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelRunner")))); this.fs = fs; this.failPolicy = failPolicy; }
public AsyncTrash(FileSystem fs, Properties properties, String user) throws IOException { int maxDeletingThreads = DEFAULT_MAX_DELETING_THREADS; if (properties.containsKey(MAX_DELETING_THREADS_KEY)) { maxDeletingThreads = Integer.parseInt(properties.getProperty(MAX_DELETING_THREADS_KEY)); } this.innerTrash = TrashFactory.createProxiedTrash(fs, properties, user); this.executor = ExecutorsUtils.loggingDecorator( MoreExecutors.getExitingExecutorService(ScalingThreadPoolExecutor.newScalingThreadPool(0, maxDeletingThreads, 100, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("Async-trash-delete-pool-%d"))))); }
private RootMetricContext(List<Tag<?>> tags) throws NameConflictException { super(ROOT_METRIC_CONTEXT, null, tags, true); this.innerMetricContexts = Sets.newConcurrentHashSet(); this.referenceQueue = new ReferenceQueue<>(); this.referenceQueueExecutorService = ExecutorsUtils.loggingDecorator(MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("GobblinMetrics-ReferenceQueue"))))); this.referenceQueueExecutorService.scheduleWithFixedDelay(new CheckReferenceQueue(), 0, 2, TimeUnit.SECONDS); this.reporters = Sets.newConcurrentHashSet(); this.reportingStarted = false; addShutdownHook(); }
/** * @param state This is a Job State */ public HiveRegistrationPublisher(State state) { super(state); this.hiveRegister = this.closer.register(HiveRegister.get(state)); this.hivePolicyExecutor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(new HiveRegProps(state).getNumThreads(), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("HivePolicyExecutor-%d")))); this.metricContext = Instrumented.getMetricContext(state, HiveRegistrationPublisher.class); isPathDedupeEnabled = state.getPropAsBoolean(PATH_DEDUPE_ENABLED, this.DEFAULT_PATH_DEDUPE_ENABLED); }