private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
LOGGER.config(getThreadName() + " : processSingleItem() : processing " + item); int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processSingleItem() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }
private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }
LOGGER.config(getThreadName() + " : processSingleItem() : processing " + item); int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processSingleItem() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }
LOGGER.config(getThreadName() + " : processSingleItem() : processing " + item); int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { LOGGER.warning(getThreadName() + " : processSingleItem() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
ProcessingBucket(final ProcessingBucketGroup<I> group, final ClusterInfo cluster, final ItemProcessor<I> processor, final AsyncErrorHandler errorHandler) { this.group = group; AsyncConfig config = group.getConfig(); if(config instanceof AsyncConfig2) { this.maxQueueSize = ((AsyncConfig2)config).getMaxQueueSize(); } else { this.maxQueueSize = 0; } this.stateLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.stateReadLock = stateLock.readLock(); this.stateWriteLock = stateLock.writeLock(); this.bucketLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.bucketWriteLock = bucketLock.writeLock(); this.bucketReadLock = bucketLock.readLock(); this.bucketIsEmpty = bucketWriteLock.newCondition(); this.bucketNotFull = bucketWriteLock.newCondition(); this.quarantineLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.quarantineWriteLock = quarantineLock.writeLock(); this.quarantineReadLock = quarantineLock.readLock(); this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.baselineTimestamp = System.currentTimeMillis(); this.processor = processor; if (null == errorHandler) { this.errorHandler = new LoggingErrorHandler(); } else { this.errorHandler = errorHandler; } }
ProcessingBucket(final ProcessingBucketGroup<I> group, final ClusterInfo cluster, final ItemProcessor<I> processor, final AsyncErrorHandler errorHandler) { this.group = group; AsyncConfig config = group.getConfig(); if(config instanceof AsyncConfig2) { this.maxQueueSize = ((AsyncConfig2)config).getMaxQueueSize(); } else { this.maxQueueSize = 0; } this.stateLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.stateReadLock = stateLock.readLock(); this.stateWriteLock = stateLock.writeLock(); this.bucketLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.bucketWriteLock = bucketLock.writeLock(); this.bucketReadLock = bucketLock.readLock(); this.bucketIsEmpty = bucketWriteLock.newCondition(); this.bucketNotFull = bucketWriteLock.newCondition(); this.quarantineLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.quarantineWriteLock = quarantineLock.writeLock(); this.quarantineReadLock = quarantineLock.readLock(); this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.baselineTimestamp = System.currentTimeMillis(); this.processor = processor; if (null == errorHandler) { this.errorHandler = new LoggingErrorHandler(); } else { this.errorHandler = errorHandler; } }
ProcessingBucket(final ProcessingBucketGroup<I> group, final ClusterInfo cluster, final ItemProcessor<I> processor, final AsyncErrorHandler errorHandler) { this.group = group; this.stateLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.stateReadLock = stateLock.readLock(); this.stateWriteLock = stateLock.writeLock(); this.bucketLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.bucketWriteLock = bucketLock.writeLock(); this.bucketReadLock = bucketLock.readLock(); this.bucketIsEmpty = bucketWriteLock.newCondition(); this.quarantineLock = new TerracottaReadWriteLock(group.getConfig().isSynchronousWrite()); this.quarantineWriteLock = quarantineLock.writeLock(); this.quarantineReadLock = quarantineLock.readLock(); this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.baselineTimestamp = System.currentTimeMillis(); this.processor = processor; if (null == errorHandler) { this.errorHandler = new LoggingErrorHandler(); } else { this.errorHandler = errorHandler; } }
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;