/** * In case of BatchException we expect that the dispatcher has removed all the events till the * event that threw BatchException. */ public void handleException() { final GatewaySenderStats statistics = this.sender.getStatistics(); statistics.incBatchesRedistributed(); this.resetLastPeekedEvents = true; }
private void handleUnSuccessfulBatchDispatch(List events) { final GatewaySenderStats statistics = this.sender.getStatistics(); statistics.incBatchesRedistributed(); // Set posDup flag on each event in the batch Iterator it = events.iterator(); while (it.hasNext() && !this.isStopped) { Object o = it.next(); if (o != null && o instanceof GatewaySenderEventImpl) { GatewaySenderEventImpl ge = (GatewaySenderEventImpl) o; ge.setPossibleDuplicate(true); } } }
public void handleUnSuccessBatchAck(int bId) { this.sender.getStatistics().incBatchesRedistributed(); // Set posDup flag on each event in the batch List<GatewaySenderEventImpl>[] eventsArr = this.batchIdToEventsMap.get(bId); if (eventsArr != null) { List<GatewaySenderEventImpl> events = eventsArr[0]; Iterator it = events.iterator(); while (it.hasNext() && !this.isStopped) { Object o = it.next(); if (o != null && o instanceof GatewaySenderEventImpl) { GatewaySenderEventImpl ge = (GatewaySenderEventImpl) o; ge.setPossibleDuplicate(true); } } } }
AbstractBucketRegionQueue(String regionName, RegionAttributes attrs, LocalRegion parentRegion, InternalCache cache, InternalRegionArguments internalRegionArgs) { super(regionName, attrs, parentRegion, cache, internalRegionArgs); this.gatewaySenderStats = this.getPartitionedRegion().getParallelGatewaySender().getStatistics(); }
private GatewaySenderStats mockGatewaySenderStats() { GatewaySenderStats stats = mock(GatewaySenderStats.class); when(sender.getStatistics()).thenReturn(stats); return stats; }
/** * Just remove the event from the unprocessed events map if it is present. This method added to * fix bug 37603 */ protected boolean basicHandlePrimaryDestroy(final EventID eventId) { if (this.sender.isPrimary()) { // no need to do anything if we have become the primary return false; } GatewaySenderStats statistics = this.sender.getStatistics(); // Get the event from the map synchronized (unprocessedEventsLock) { if (this.unprocessedEvents == null) return false; // now we can safely use the unprocessedEvents field EventWrapper ew = this.unprocessedEvents.remove(eventId); if (ew != null) { ew.event.release(); statistics.incUnprocessedEventsRemovedByPrimary(); return true; } } return false; }
private void createGatewaySender() { // Mock gateway sender this.sender = ParallelGatewaySenderHelper.createGatewaySender(this.cache); when(this.sender.isBatchConflationEnabled()).thenReturn(true); when(sender.getStatistics()).thenReturn(mock(GatewaySenderStats.class)); }
@Override protected void enqueueEvent(GatewayQueueEvent gatewayQueueEvent) { boolean queuedEvent = false; try { if (getSender().beforeEnqueue(gatewayQueueEvent)) { long start = getSender().getStatistics().startTime(); try { queuedEvent = this.queue.put(gatewayQueueEvent); } catch (InterruptedException e) { e.printStackTrace(); } getSender().getStatistics().endPut(start); } else { if (logger.isDebugEnabled()) { logger.debug("The Event {} is filtered.", gatewayQueueEvent); } getSender().getStatistics().incEventsFiltered(); } } finally { if (!queuedEvent) { // it was not queued for some reason ((GatewaySenderEventImpl) gatewayQueueEvent).release(); } } }
public GatewaySenderMBeanBridge(GatewaySender sender) { this.sender = sender; this.monitor = new MBeanStatsMonitor("GatewaySenderMXBeanMonitor"); this.overflowMonitor = new GatewaySenderOverflowMonitor("GatewaySenderMXBeanOverflowMonitor"); this.abstractSender = ((AbstractGatewaySender) this.sender); GatewaySenderStats stats = abstractSender.getStatistics(); addGatewaySenderStats(stats); initializeStats(); }
GatewaySenderStats statistics = this.eventProcessor.sender.getStatistics(); boolean success = false; try {
@Override public void afterDestroy(EntryEvent event) { if (this.sender.isPrimary()) { return; } // fix bug 37603 // There is a small window where queue has not been created fully yet. The region is created, // and it receives afterDestroy callback. final Set<RegionQueue> queues = this.sender.getQueues(); if (queues != null && !queues.isEmpty()) { this.sender.getStatistics().decQueueSize(); } // Send event to the event dispatcher Object oldValue = event.getOldValue(); if (oldValue instanceof GatewaySenderEventImpl) { GatewaySenderEventImpl senderEvent = (GatewaySenderEventImpl) oldValue; if (logger.isDebugEnabled()) { logger.debug("Received after Destroy for Secondary event {} the key was {}", senderEvent, event.getKey()); } this.processor.handlePrimaryDestroy(senderEvent); } } }
region.getParallelGatewaySender().getStatistics().incConflationIndexesMapSize();
@Override public void afterCreate(EntryEvent event) { if (this.sender.isPrimary()) { // The secondary has failed over to become the primary. There is a small // window where the secondary has become the primary, but the listener // is // still set. Ignore any updates to the map at this point. It is unknown // what the state of the map is. This may result in duplicate events // being sent. return; } // There is a small window where queue has not been created fully yet. // The underlying region of the queue is created, and it receives afterDestroy callback final Set<RegionQueue> queues = this.sender.getQueues(); if (queues != null && !queues.isEmpty()) { this.sender.getStatistics().incQueueSize(); } // fix bug 35730 // Send event to the event dispatcher GatewaySenderEventImpl senderEvent = (GatewaySenderEventImpl) event.getNewValue(); this.processor.handlePrimaryEvent(senderEvent); }
private void createGatewaySender() { // Mock gateway sender this.sender = ParallelGatewaySenderHelper.createGatewaySender(this.cache); when(this.queueRegion.getParallelGatewaySender()).thenReturn(this.sender); when(this.sender.getQueues()).thenReturn(null); when(this.sender.getDispatcherThreads()).thenReturn(1); stats = new GatewaySenderStats(new DummyStatisticsFactory(), "ln"); when(this.sender.getStatistics()).thenReturn(stats); }
@Override protected void rebalance() { GatewaySenderStats statistics = this.sender.getStatistics(); long startTime = statistics.startLoadBalance(); try { for (ParallelGatewaySenderEventProcessor parallelProcessor : this.processors) { GatewaySenderEventRemoteDispatcher remoteDispatcher = (GatewaySenderEventRemoteDispatcher) parallelProcessor.getDispatcher(); if (remoteDispatcher.isConnectedToRemote()) { remoteDispatcher.stopAckReaderThread(); remoteDispatcher.destroyConnection(); } } } finally { statistics.endLoadBalance(startTime); } }
@Override protected void rebalance() { GatewaySenderStats statistics = this.sender.getStatistics(); long startTime = statistics.startLoadBalance(); try { if (this.dispatcher.isRemoteDispatcher()) { GatewaySenderEventRemoteDispatcher remoteDispatcher = (GatewaySenderEventRemoteDispatcher) this.dispatcher; if (remoteDispatcher.isConnectedToRemote()) { remoteDispatcher.stopAckReaderThread(); remoteDispatcher.destroyConnection(); } } } finally { statistics.endLoadBalance(startTime); } }
public SerialGatewaySenderQueue(AbstractGatewaySender abstractSender, String regionName, CacheListener listener) { // The queue starts out with headKey and tailKey equal to -1 to force // them to be initialized from the region. this.regionName = regionName; this.headKey = -1; this.tailKey.set(-1); this.indexes = new HashMap<String, Map<Object, Long>>(); this.enableConflation = abstractSender.isBatchConflationEnabled(); this.diskStoreName = abstractSender.getDiskStoreName(); this.batchSize = abstractSender.getBatchSize(); this.enablePersistence = abstractSender.isPersistenceEnabled(); if (this.enablePersistence) { this.isDiskSynchronous = abstractSender.isDiskSynchronous(); } else { this.isDiskSynchronous = false; } this.maximumQueueMemory = abstractSender.getMaximumMemeoryPerDispatcherQueue(); this.stats = abstractSender.getStatistics(); initializeRegion(abstractSender, listener); // Increment queue size. Fix for bug 51988. this.stats.incQueueSize(this.region.size()); this.removalThread = new BatchRemovalThread(abstractSender.getCache()); this.removalThread.start(); this.sender = abstractSender; if (logger.isDebugEnabled()) { logger.debug("{}: Contains {} elements", this, size()); } }
@Test public void validateUnprocessedTokensMapUpdated() throws Exception { GatewaySenderStats gss = mock(GatewaySenderStats.class); when(sender.getStatistics()).thenReturn(gss); // Handle primary event EventID id = handlePrimaryEvent(); // Verify the token was added by checking the correct stat methods were called and the size of // the unprocessedTokensMap. verify(gss).incUnprocessedTokensAddedByPrimary(); verify(gss, never()).incUnprocessedEventsRemovedByPrimary(); assertEquals(1, this.processor.getUnprocessedTokensSize()); // Handle the event from the secondary. The call to enqueueEvent is necessary to synchronize the // unprocessedEventsLock and prevent the assertion error in basicHandleSecondaryEvent. EntryEventImpl event = mock(EntryEventImpl.class); when(event.getRegion()).thenReturn(mock(LocalRegion.class)); when(event.getEventId()).thenReturn(id); when(event.getOperation()).thenReturn(Operation.CREATE); this.processor.enqueueEvent(null, event, null); // Verify the token was removed by checking the correct stat methods were called and the size of // the unprocessedTokensMap. verify(gss).incUnprocessedTokensRemovedBySecondary(); verify(gss, never()).incUnprocessedEventsAddedBySecondary(); assertEquals(0, this.processor.getUnprocessedTokensSize()); }
@Override protected void registerEventDroppedInPrimaryQueue(EntryEventImpl droppedEvent) { if (queue == null) { return; } ConcurrentParallelGatewaySenderQueue cpgsq = (ConcurrentParallelGatewaySenderQueue) queue; PartitionedRegion prQ = cpgsq.getRegion(droppedEvent.getRegion().getFullPath()); if (prQ == null) { if (logger.isDebugEnabled()) { logger.debug("shadow partitioned region " + droppedEvent.getRegion().getFullPath() + " is not created yet."); } return; } int bucketId = PartitionedRegionHelper.getHashKey((EntryOperation) droppedEvent); long shadowKey = droppedEvent.getTailKey(); ParallelGatewaySenderQueue pgsq = (ParallelGatewaySenderQueue) cpgsq.getQueueByBucket(bucketId); boolean isPrimary = prQ.getRegionAdvisor().getBucketAdvisor(bucketId).isPrimary(); if (isPrimary) { pgsq.sendQueueRemovalMesssageForDroppedEvent(prQ, bucketId, shadowKey); this.sender.getStatistics().incEventsDroppedDueToPrimarySenderNotRunning(); if (logger.isDebugEnabled()) { logger.debug("register dropped event for primary queue. BucketId is " + bucketId + ", shadowKey is " + shadowKey + ", prQ is " + prQ.getFullPath()); } } }
@Test public void validateUnprocessedTokensMapReaping() throws Exception { // Set the token timeout low int originalTokenTimeout = AbstractGatewaySender.TOKEN_TIMEOUT; AbstractGatewaySender.TOKEN_TIMEOUT = 500; try { GatewaySenderStats gss = mock(GatewaySenderStats.class); when(sender.getStatistics()).thenReturn(gss); // Add REAP_THRESHOLD + 1 events to the unprocessed tokens map. This causes the uncheckedCount // in the reaper to be REAP_THRESHOLD. The next event will cause the reaper to run.\ int numEvents = SerialGatewaySenderEventProcessor.REAP_THRESHOLD + 1; for (int i = 0; i < numEvents; i++) { handlePrimaryEvent(); } assertEquals(numEvents, this.processor.getUnprocessedTokensSize()); // Wait for the timeout Thread.sleep(AbstractGatewaySender.TOKEN_TIMEOUT + 1000); // Add one more event to the unprocessed tokens map. This will reap all of the previous // tokens. handlePrimaryEvent(); assertEquals(1, this.processor.getUnprocessedTokensSize()); } finally { AbstractGatewaySender.TOKEN_TIMEOUT = originalTokenTimeout; } }