public ProcessingBucket<I> createNewBucket(final ItemProcessor<I> processor) { return createNewBucket(processor, null); }
deadNodes = determineDeadNodes(); if (deadNodes != null && deadNodes.size() > 0) { for (String nodeId : deadNodes) { int deadBucketCount = recoverDeadBuckets(nodeId); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Node '" + nodeId + "' isn't connected anymore and " + deadBucketCount + " dead buckets are picked up");
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
lootsize = group.quarantineItemsFromDeadBuckets(this); if (0 == lootsize) { lootsize = group.tryToStealFromOthers(this); final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
deadNodes = determineDeadNodes(); int localBuckets = buckets.get(getCurrentNodeId()).size(); try { do { if (deadNodes != null && deadNodes.size() > 0) { for (String nodeId : deadNodes) { int lastNodeDeadBucketCount = recoverDeadBuckets(nodeId); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Node '" + nodeId + "' isn't connected anymore and " + lastNodeDeadBucketCount + " dead buckets are picked up");
final ProcessingBucket<I> bucket = group.createNewBucket(processor); bucket.setQuarantinedItemsFilter(filter); localBuckets.add(bucket); try { group.start(bucket); } catch (final ExistingRunningThreadException e) { throw new AssertionError(e);
public ProcessingBucketGroup(final ClusterInfo cluster, final AsyncConfig config, final StealPolicy<I> policy) { this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.config = config; this.policy = policy; this.buckets = newMap(); this.groupLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.groupWriteLock = groupLock.writeLock(); this.groupReadLock = groupLock.readLock(); }
public void nodeLeft(final ClusterEvent event) { groupWriteLock.lock(); try { final int deadBucketsCount = recoverDeadBuckets(event.getNode().getId()); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Node '" + event.getNode() + "' left and " + deadBucketsCount + " dead buckets are picked up"); } finally { groupWriteLock.unlock(); } }
void start(final String genericThreadName) throws ExistingRunningThreadException { String name; if (threadName != null) { name = threadName; } else { name = genericThreadName; } bucketWriteLock.lock(); try { ensureNonExistingThread(); processingThread = new Thread(group.getThreadGroup(), new ProcessingThread(), name + " - processing"); processingThread.setDaemon(true); processingThread.start(); } finally { bucketWriteLock.unlock(); } }
public void start(final ProcessingBucket<I> bucket) throws ExistingRunningThreadException { if (null == bucket) throw new IllegalArgumentException("bucket can't be null"); AsyncCoordinator.UNFLUSHABLE_STATE.add(this); groupWriteLock.lock(); try { if (cluster != null) { cluster.addClusterListener(this); } if (buckets.containsValue(bucket)) throw new IllegalArgumentException("bucket already part of the group"); Set<ProcessingBucket<I>> bucketCollection = buckets.get(getCurrentNodeId()); if (null == bucketCollection) { bucketCollection = new ConcurrentDistributedSet<ProcessingBucket<I>>(); buckets.put(getCurrentNodeId(), bucketCollection); } bucketCollection.add(bucket); bucket.start("ProcessingBucket " + buckets.size()); } finally { groupWriteLock.unlock(); } }
/** * Creates a new coordinator instance with a custom configuration and steal policy. * <p/> * In case {@code null} is provided for either argument, the corresponding default will be used. For the * configuration, the default is {@link DefaultAsyncConfig} and for the steal policy, the default is * {@link FallBehindStealPolicy}. The {@code maxAllowedFallBehind} value of the default steal policy will be retrieved * from the configuration. * * @param config the custom configuration instance that should be used by this coordinator, if {@code null} is * provided the default configuration will be used * @param stealPolicy the custom steal policy that should be used by this coordinator, if {@code null} is provided the * default steal policy will be used */ public AsyncCoordinator(AsyncConfig config, StealPolicy<I> stealPolicy) { if (null == config) { config = DefaultAsyncConfig.getInstance(); } if (null == stealPolicy) { stealPolicy = new FallBehindStealPolicy<I>(config.getMaxAllowedFallBehind()); } this.group = new ProcessingBucketGroup(config, stealPolicy); this.coordinatorLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.coordinatorWriteLock = coordinatorLock.writeLock(); this.coordinatorReadLock = coordinatorLock.readLock(); }
lootsize = group.quarantineItemsFromDeadBuckets(this); if (0 == lootsize) { lootsize = group.tryToStealFromOthers(this); final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
deadNodes = determineDeadNodes(); int localBuckets = buckets.get(getCurrentNodeId()).size(); try { do { if (deadNodes != null && deadNodes.size() > 0) { for (String nodeId : deadNodes) { int lastNodeDeadBucketCount = recoverDeadBuckets(nodeId); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Node '" + nodeId + "' isn't connected anymore and " + lastNodeDeadBucketCount + " dead buckets are picked up");
final ProcessingBucket<I> bucket = group.createNewBucket(processor); bucket.setQuarantinedItemsFilter(filter); localBuckets.add(bucket); try { group.start(bucket); } catch (final ExistingRunningThreadException e) { throw new AssertionError(e);
public ProcessingBucketGroup(final ClusterInfo cluster, final AsyncConfig config, final StealPolicy<I> policy) { this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.config = config; this.policy = policy; this.buckets = newMap(); this.groupLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.groupWriteLock = groupLock.writeLock(); this.groupReadLock = groupLock.readLock(); }
public void nodeLeft(final ClusterEvent event) { groupWriteLock.lock(); try { final int deadBucketsCount = recoverDeadBuckets(event.getNode().getId()); if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Node '" + event.getNode() + "' left and " + deadBucketsCount + " dead buckets are picked up"); } finally { groupWriteLock.unlock(); } }
void start(final String genericThreadName) throws ExistingRunningThreadException { String name; if (threadName != null) { name = threadName; } else { name = genericThreadName; } bucketWriteLock.lock(); try { ensureNonExistingThread(); processingThread = new Thread(group.getThreadGroup(), new ProcessingThread(), name + " - processing"); processingThread.setDaemon(true); processingThread.start(); } finally { bucketWriteLock.unlock(); } }
public void start(final ProcessingBucket<I> bucket) throws ExistingRunningThreadException { if (null == bucket) throw new IllegalArgumentException("bucket can't be null"); AsyncCoordinator.UNFLUSHABLE_STATE.add(this); groupWriteLock.lock(); try { if (cluster != null) { cluster.addClusterListener(this); } Set<ProcessingBucket<I>> bucketCollection = buckets.get(getCurrentNodeId()); if (null == bucketCollection) { bucketCollection = new ConcurrentDistributedSet<ProcessingBucket<I>>(); buckets.put(getCurrentNodeId(), bucketCollection); } else { if (bucketCollection.contains(bucket)) throw new IllegalArgumentException("bucket already part of the group"); } bucketCollection.add(bucket); bucket.start("ProcessingBucket " + buckets.size()); } finally { groupWriteLock.unlock(); } }
/** * Creates a new coordinator instance with a custom configuration and steal policy. * <p/> * In case {@code null} is provided for either argument, the corresponding default will be used. For the * configuration, the default is {@link DefaultAsyncConfig} and for the steal policy, the default is * {@link FallBehindStealPolicy}. The {@code maxAllowedFallBehind} value of the default steal policy will be retrieved * from the configuration. * * @param config the custom configuration instance that should be used by this coordinator, if {@code null} is * provided the default configuration will be used * @param stealPolicy the custom steal policy that should be used by this coordinator, if {@code null} is provided the * default steal policy will be used */ public AsyncCoordinator(AsyncConfig config, StealPolicy<I> stealPolicy) { if (null == config) { config = DefaultAsyncConfig.getInstance(); } if (null == stealPolicy) { stealPolicy = new FallBehindStealPolicy<I>(config.getMaxAllowedFallBehind()); } this.group = new ProcessingBucketGroup(config, stealPolicy); this.coordinatorLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.coordinatorWriteLock = coordinatorLock.writeLock(); this.coordinatorReadLock = coordinatorLock.readLock(); }
lootsize = group.quarantineItemsFromDeadBuckets(this); if (0 == lootsize) { lootsize = group.tryToStealFromOthers(this); final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;