/** * Start the coordinator with an item processor and a custom concurrency. * * @param processor the item processor instance that will be used to process each item that has been added * @param processingConcurrency the number of buckets that will be created to store the work items * @see #start(ItemProcessor, int, ScatterPolicy) */ public void start(final ItemProcessor<I> processor, final int processingConcurrency) { start(processor, processingConcurrency, null); }
/** * Start the coordinator with an item processor and a custom concurrency. * * @param processor the item processor instance that will be used to process each item that has been added * @param processingConcurrency the number of buckets that will be created to store the work items * @see #start(ItemProcessor, int, ItemScatterPolicy) */ public void start(final ItemProcessor<I> processor, final int processingConcurrency) { start(processor, processingConcurrency, (ItemScatterPolicy) null); }
/** * Start the coordinator with an item processor and a concurrency of 1. * * @param processor the item processor instance that will be used to process each item that has been added * @see #start(ItemProcessor, int, ScatterPolicy) */ public void start(final ItemProcessor<I> processor) { start(processor, 1); }
/** * Start the coordinator with an item processor and a custom concurrency. * * @param processor the item processor instance that will be used to process each item that has been added * @param processingConcurrency the number of buckets that will be created to store the work items * @see #start(ItemProcessor, int, ItemScatterPolicy) */ public void start(final ItemProcessor<I> processor, final int processingConcurrency) { start(processor, processingConcurrency, (ItemScatterPolicy) null); }
/** * Start the coordinator with an item processor and a concurrency of 1. * * @param processor the item processor instance that will be used to process each item that has been added * @see #start(ItemProcessor, int, ItemScatterPolicy) */ public void start(final ItemProcessor<I> processor) { start(processor, 1); }
/** * Start the coordinator with an item processor and a concurrency of 1. * * @param processor the item processor instance that will be used to process each item that has been added * @see #start(ItemProcessor, int, ItemScatterPolicy) */ public void start(final ItemProcessor<I> processor) { start(processor, 1); }
/** * Start the coordinator. * <p/> * Note that while this coordinator instance has to be a Terracotta DSO root, the {@code start} method has to be * executed by every node. The item processor instance will never be shared and will remain local to the cluster node * that it was instantiated on. This allows each item processor to refer to local resources, like database * connections, without having to worry about how to coordinate these across the cluster. If a lot of work items are * added to the coordinator, there might be a high amount of lock contention while adding the items to the internal * buckets. Increasing the concurrency will create a larger amount of buckets, which allows more work items to be * added concurrently. A custom scatter policy can be provided to change how buckets are internally select when an * item is added. If {@code null} is provided for the scatter policy, the default will be used. Meaning that when the * concurrency is 1, the single bucket will always be selected and if the concurrency is higher, the bucket will be * selected based on the item's hash code. * * @param processor the item processor instance that will be used to process each item that has been added * @param processingConcurrency the number of buckets that will be created to store the work items, this has to be * minimal 1 * @param policy the scatter policy that will be used to select the bucket into which a work item will be added * @see #add(Object) * @see HashCodeScatterPolicy * @deprecated please use the #start(ItemProcessor<I>, int, ItemScatterPolicy) */ @Deprecated public void start(final ItemProcessor<I> processor, final int processingConcurrency, ScatterPolicy policy) { LegacyScatterPolicyWrapper<I> policyWrapper = null; if (policy != null) { policyWrapper = new LegacyScatterPolicyWrapper<I>(policy); } start(processor, processingConcurrency, policyWrapper); }
/** * Start the coordinator. * <p/> * Note that while this coordinator instance has to be a Terracotta DSO root, the {@code start} method has to be * executed by every node. The item processor instance will never be shared and will remain local to the cluster node * that it was instantiated on. This allows each item processor to refer to local resources, like database * connections, without having to worry about how to coordinate these across the cluster. If a lot of work items are * added to the coordinator, there might be a high amount of lock contention while adding the items to the internal * buckets. Increasing the concurrency will create a larger amount of buckets, which allows more work items to be * added concurrently. A custom scatter policy can be provided to change how buckets are internally select when an * item is added. If {@code null} is provided for the scatter policy, the default will be used. Meaning that when the * concurrency is 1, the single bucket will always be selected and if the concurrency is higher, the bucket will be * selected based on the item's hash code. * * @param processor the item processor instance that will be used to process each item that has been added * @param processingConcurrency the number of buckets that will be created to store the work items, this has to be * minimal 1 * @param policy the scatter policy that will be used to select the bucket into which a work item will be added * @see #add(Object) * @see HashCodeScatterPolicy * @deprecated please use the #start(ItemProcessor<I>, int, ItemScatterPolicy) */ @Deprecated public void start(final ItemProcessor<I> processor, final int processingConcurrency, ScatterPolicy policy) { LegacyScatterPolicyWrapper<I> policyWrapper = null; if (policy != null) { policyWrapper = new LegacyScatterPolicyWrapper<I>(policy); } start(processor, processingConcurrency, policyWrapper); }