public IteratorExecutor(Iterator<Callable<T>> runnableIterator, int numThreads, ThreadFactory threadFactory) { this.numThreads = numThreads; this.iterator = runnableIterator; this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(numThreads, threadFactory)); this.completionService = new ExecutorCompletionService<>(this.executor); this.executed = false; }
public ParallelRunner(int threads, FileSystem fs, FailPolicy failPolicy) { this.executor = ExecutorsUtils.loggingDecorator(Executors.newFixedThreadPool(threads, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("ParallelRunner")))); this.fs = fs; this.failPolicy = failPolicy; }
public AbstractTaskStateTracker(int coreThreadPoolSize, Logger logger) { Preconditions.checkArgument(coreThreadPoolSize > 0, "Thread pool size should be positive"); this.taskMetricsUpdaterExecutor = ExecutorsUtils.loggingDecorator( new ScheduledThreadPoolExecutor(coreThreadPoolSize, ExecutorsUtils.newThreadFactory(Optional.of(logger), Optional.of("TaskStateTracker-%d")))); this.logger = logger; }
/** * Start the {@link TaskScheduler}. * * @param name the name of the {@link TaskScheduler} */ @Override final void startImpl(Optional<String> name) { this.executorService = ExecutorsUtils.loggingDecorator(Executors.newScheduledThreadPool(0, ExecutorsUtils.newDaemonThreadFactory(Optional.of(LOGGER), name))); }
private ListeningExecutorService getExecutorService() { return ExecutorsUtils.loggingDecorator( ScalingThreadPoolExecutor.newScalingThreadPool(0, this.threadPoolSize, TimeUnit.SECONDS.toMillis(10))); }
public AsyncTrash(FileSystem fs, Properties properties, String user) throws IOException { int maxDeletingThreads = DEFAULT_MAX_DELETING_THREADS; if (properties.containsKey(MAX_DELETING_THREADS_KEY)) { maxDeletingThreads = Integer.parseInt(properties.getProperty(MAX_DELETING_THREADS_KEY)); } this.innerTrash = TrashFactory.createProxiedTrash(fs, properties, user); this.executor = ExecutorsUtils.loggingDecorator( MoreExecutors.getExitingExecutorService(ScalingThreadPoolExecutor.newScalingThreadPool(0, maxDeletingThreads, 100, ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("Async-trash-delete-pool-%d"))))); }
protected HiveRegister(State state) { this.props = new HiveRegProps(state); this.hiveDbRootDir = this.props.getDbRootDir(); this.executor = ExecutorsUtils.loggingDecorator( ScalingThreadPoolExecutor.newScalingThreadPool(0, this.props.getNumThreads(), TimeUnit.SECONDS.toMillis(10), ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of(getClass().getSimpleName())))); }
private RootMetricContext(List<Tag<?>> tags) throws NameConflictException { super(ROOT_METRIC_CONTEXT, null, tags, true); this.innerMetricContexts = Sets.newConcurrentHashSet(); this.referenceQueue = new ReferenceQueue<>(); this.referenceQueueExecutorService = ExecutorsUtils.loggingDecorator(MoreExecutors.getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1, ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("GobblinMetrics-ReferenceQueue"))))); this.referenceQueueExecutorService.scheduleWithFixedDelay(new CheckReferenceQueue(), 0, 2, TimeUnit.SECONDS); this.reporters = Sets.newConcurrentHashSet(); this.reportingStarted = false; addShutdownHook(); }
public DatasetCleaner(FileSystem fs, Properties props) throws IOException { this.closer = Closer.create(); try { FileSystem optionalRateControlledFs = fs; if (props.contains(DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT)) { optionalRateControlledFs = this.closer.register(new RateControlledFileSystem(fs, Long.parseLong(props.getProperty(DATASET_CLEAN_HDFS_CALLS_PER_SECOND_LIMIT)))); ((RateControlledFileSystem) optionalRateControlledFs).startRateControl(); } this.datasetFinder = new MultiCleanableDatasetFinder(optionalRateControlledFs, props); } catch (NumberFormatException exception) { throw new IOException(exception); } catch (ExecutionException exception) { throw new IOException(exception); } ExecutorService executor = ScalingThreadPoolExecutor.newScalingThreadPool(0, Integer.parseInt(props.getProperty(MAX_CONCURRENT_DATASETS_CLEANED, DEFAULT_MAX_CONCURRENT_DATASETS_CLEANED)), 100, ExecutorsUtils.newThreadFactory(Optional.of(LOG), Optional.of("Dataset-cleaner-pool-%d"))); this.service = ExecutorsUtils.loggingDecorator(executor); List<Tag<?>> tags = Lists.newArrayList(); tags.addAll(Tag.fromMap(AzkabanTags.getAzkabanTags())); // TODO -- Remove the dependency on gobblin-core after new Gobblin Metrics does not depend on gobblin-core. this.metricContext = this.closer.register(Instrumented.getMetricContext(new State(props), DatasetCleaner.class, tags)); this.isMetricEnabled = GobblinMetrics.isEnabled(props); this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, RetentionEvents.NAMESPACE).build(); this.throwables = Lists.newArrayList(); }