/** * Creates a new fixed size ThreadPoolExecutor. * It's using a blockingqueue of maximum 1000 elements and the rejection * policy is set to CallerRunsPolicy for the case the queue is full. * These settings are required to cap the queue, to make sure the * timeouts are reasonable for most jobs. * * @param threads the number of threads * @param groupname a label to identify the threadpool; useful for profiling. * @return the new ExecutorService */ public static ThreadPoolExecutor newFixedThreadPool(int threads, String groupname) { return newFixedThreadPool( threads, groupname, QUEUE_MAX_LENGTH ); }
public ElasticsearchClientImpl(RestClient restClient, Sniffer sniffer, int requestTimeoutValue, TimeUnit requestTimeoutUnit, GsonProvider initialGsonProvider) { this.restClient = restClient; this.sniffer = sniffer; this.timeoutExecutorService = Executors.newScheduledThreadPool( "Elasticsearch request timeout executor" ); this.requestTimeoutValue = requestTimeoutValue; this.requestTimeoutUnit = requestTimeoutUnit; this.gsonProvider = initialGsonProvider; }
@Override public Future<?> start() { BatchCoordinator coordinator = createCoordinator(); ExecutorService executor = Executors.newFixedThreadPool( 1, "batch coordinator" ); try { Future<?> submit = executor.submit( coordinator ); return submit; } finally { executor.shutdown(); } }
public ElasticsearchClientImpl(RestClient restClient, Sniffer sniffer, int requestTimeoutValue, TimeUnit requestTimeoutUnit, GsonProvider initialGsonProvider) { this.restClient = restClient; this.sniffer = sniffer; this.timeoutExecutorService = Executors.newScheduledThreadPool( "Elasticsearch request timeout executor" ); this.requestTimeoutValue = requestTimeoutValue; this.requestTimeoutUnit = requestTimeoutUnit; this.gsonProvider = initialGsonProvider; }
/** * @param name The name of the orchestrator thread (and of this orchestrator when reporting errors) * @param maxChangesetsPerBatch The maximum number of changesets to * process in a single batch. Higher values mean lesser chance of transport * thread starvation, but higher heap consumption. * @param fair if {@code true} changesets are always submitted to the * delegate in FIFO order, if {@code false} changesets submitted * when the internal queue is full may be submitted out of order. * @param delegate A delegate orchestrator. May not be thread-safe. */ public ElasticsearchBatchingSharedWorkOrchestrator( String name, int maxChangesetsPerBatch, boolean fair, ElasticsearchAccumulatingWorkOrchestrator delegate, ErrorHandler errorHandler) { super( name ); this.delegate = delegate; this.errorHandler = errorHandler; this.changesetsPerBatch = maxChangesetsPerBatch; changesetQueue = new ArrayBlockingQueue<>( maxChangesetsPerBatch, fair ); changesetBuffer = new ArrayList<>( maxChangesetsPerBatch ); executor = Executors.newFixedThreadPool( 1, name ); processingScheduled = new AtomicBoolean( false ); }
/** * @param name The name of the orchestrator thread (and of this orchestrator when reporting errors) * @param maxChangesetsPerBatch The maximum number of changesets to * process in a single batch. Higher values mean lesser chance of transport * thread starvation, but higher heap consumption. * @param fair if {@code true} changesets are always submitted to the * delegate in FIFO order, if {@code false} changesets submitted * when the internal queue is full may be submitted out of order. * @param delegate A delegate orchestrator. May not be thread-safe. */ public ElasticsearchBatchingSharedWorkOrchestrator( String name, int maxChangesetsPerBatch, boolean fair, ElasticsearchAccumulatingWorkOrchestrator delegate, ErrorHandler errorHandler) { super( name ); this.delegate = delegate; this.errorHandler = errorHandler; this.changesetsPerBatch = maxChangesetsPerBatch; changesetQueue = new ArrayBlockingQueue<>( maxChangesetsPerBatch, fair ); changesetBuffer = new ArrayList<>( maxChangesetsPerBatch ); executor = Executors.newFixedThreadPool( 1, name ); processingScheduled = new AtomicBoolean( false ); }
/** * Will spawn a thread for each type in rootEntities, they will all re-join * on endAllSignal when finished. * * @throws InterruptedException if interrupted while waiting for endAllSignal. */ private void doBatchWork() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool( typesToIndexInParallel, "BatchIndexingWorkspace" ); for ( Class<?> type : rootEntities ) { indexingTasks.add( executor.submit( new BatchIndexingWorkspace( sessionFactory, mapping, type, documentBuilderThreads, cacheMode, objectLoadingBatchSize, endAllSignal, monitor, objectsLimit, idFetchSize, transactionTimeout, tenantId ) ) ); } executor.shutdown(); endAllSignal.await(); //waits for the executor to finish }
private void startTransformationToLuceneWork() { final Runnable documentOutputter = new IdentifierConsumerDocumentProducer( primaryKeyStream, monitor, sessionFactory, producerEndSignal, cacheMode, indexedType, idNameOfIndexedType, transactionTimeout, tenantId, mapping ); final ThreadPoolExecutor execFirstLoader = Executors.newFixedThreadPool( documentBuilderThreads, "entityloader" ); try { for ( int i = 0; i < documentBuilderThreads; i++ ) { tasks.add( execFirstLoader.submit( documentOutputter ) ); } } finally { execFirstLoader.shutdown(); } } }
private void startProducingPrimaryKeys(BatchTransactionalContext transactionalContext) { final Runnable primaryKeyOutputter = new OptionallyWrapInJTATransaction( transactionalContext, new IdentifierProducer( primaryKeyStream, sessionFactory, objectLoadingBatchSize, indexedType, monitor, objectsLimit, idFetchSize, tenantId ), transactionTimeout, tenantId ); //execIdentifiersLoader has size 1 and is not configurable: ensures the list is consistent as produced by one transaction final ThreadPoolExecutor execIdentifiersLoader = Executors.newFixedThreadPool( 1, "identifierloader" ); try { tasks.add( execIdentifiersLoader.submit( primaryKeyOutputter ) ); } finally { execIdentifiersLoader.shutdown(); } }