public SerialGatewaySenderQueue(AbstractGatewaySender abstractSender, String regionName, CacheListener listener) { // The queue starts out with headKey and tailKey equal to -1 to force // them to be initialized from the region. this.regionName = regionName; this.headKey = -1; this.tailKey.set(-1); this.indexes = new HashMap<String, Map<Object, Long>>(); this.enableConflation = abstractSender.isBatchConflationEnabled(); this.diskStoreName = abstractSender.getDiskStoreName(); this.batchSize = abstractSender.getBatchSize(); this.enablePersistence = abstractSender.isPersistenceEnabled(); if (this.enablePersistence) { this.isDiskSynchronous = abstractSender.isDiskSynchronous(); } else { this.isDiskSynchronous = false; } this.maximumQueueMemory = abstractSender.getMaximumMemeoryPerDispatcherQueue(); this.stats = abstractSender.getStatistics(); initializeRegion(abstractSender, listener); // Increment queue size. Fix for bug 51988. this.stats.incQueueSize(this.region.size()); this.removalThread = new BatchRemovalThread(abstractSender.getCache()); this.removalThread.start(); this.sender = abstractSender; if (logger.isDebugEnabled()) { logger.debug("{}: Contains {} elements", this, size()); } }
public ConcurrentParallelGatewaySenderEventProcessor(AbstractGatewaySender sender, ThreadsMonitoring tMonitoring) { super("Event Processor for GatewaySender_" + sender.getId(), sender, tMonitoring); sender.getDispatcherThreads()); nDispatcher = sender.getDispatcherThreads(); for (InternalRegion pr : sender.getCache().getApplicationRegions()) { if (((LocalRegion) pr).getAllGatewaySenderIds().contains(sender.getId())) { targetRs.add(pr); createProcessors(sender.getDispatcherThreads(), targetRs);
public void setDispatcher() { AbstractGatewaySenderEventProcessor eventProcessor = abstractSender.getEventProcessor(); if (eventProcessor != null) { this.dispatcher = abstractSender.getEventProcessor().getDispatcher(); } }
public PartitionedRegion removeShadowPR(String fullpath) { try { this.sender.getLifeCycleLock().writeLock().lock(); this.sender.setEnqueuedAllTempQueueEvents(false); return this.userRegionNameToshadowPRMap.remove(fullpath); } finally { sender.getLifeCycleLock().writeLock().unlock(); } }
public void addShadowPartitionedRegionForUserPR(PartitionedRegion pr) { // Reset enqueuedAllTempQueueEvents if the sender is running // This is done so that any events received while the shadow PR is added are queued in the // tmpQueuedEvents // instead of blocking the distribute call which could cause a deadlock. See GEM-801. if (this.sender.isRunning()) { this.sender.setEnqueuedAllTempQueueEvents(false); } this.sender.getLifeCycleLock().writeLock().lock(); try { for (int i = 0; i < processors.length; i++) { processors[i].addShadowPartitionedRegionForUserPR(pr); } } finally { this.sender.getLifeCycleLock().writeLock().unlock(); } }
if (!isRunning()) { if (this.isPrimary()) { tmpDroppedEvents.add(clonedEvent); if (isDebugEnabled) { final GatewaySenderStats stats = getStatistics(); stats.incEventsReceived(); if (!checkForDistribution(event, stats)) { stats.incEventsNotQueued(); return; setModifiedEventId(clonedEvent); Object callbackArg = clonedEvent.getRawCallbackArgument(); this.isPrimary(), getId(), operation, clonedEvent, callbackArg); logger.debug( "{}: Event originated in {}. My DS id is {}. The remote DS id is {}. The recipients are: {}", this, seca.getOriginatingDSId(), this.getMyDSId(), this.getRemoteDSId(), seca.getRecipientDSIds()); logger.debug( "{}: Event originated in {}. My DS id is {}. The remote DS id is {}. The recipients are: {}", this, seca.getOriginatingDSId(), this.getMyDSId(), this.getRemoteDSId(), seca.getRecipientDSIds()); seca.setOriginatingDSId(this.getMyDSId()); seca.initializeReceipientDSIds(allRemoteDSIds);
public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { this.sender.getLifeCycleLock().writeLock().lock(); PartitionedRegion prQ = null; InternalCache cache = sender.getCache(); final String prQName = getQueueName(sender.getId(), userRegion.getFullPath()); prQ = (PartitionedRegion) cache.getRegion(prQName); if (prQ == null) { pfact.setTotalNumBuckets(sender.getMaxParallelismForReplicatedRegion()); int localMaxMemory = userRegion.getDataPolicy().withStorage() ? sender.getMaximumQueueMemory() : 0; pfact.setLocalMaxMemory(localMaxMemory); pfact.setRedundantCopies(3); // TODO:Kishor : THis need to be handled nicely pfact.setPartitionResolver(new RREventIDResolver()); if (sender.isPersistenceEnabled()) { fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION); fact.setDiskStoreName(sender.getDiskStoreName()); if (sender.isPersistenceEnabled()) fact.setDiskSynchronous(sender.isDiskSynchronous()); else { fact.setDiskSynchronous(false); sender.getMaximumQueueMemory(), EvictionAction.OVERFLOW_TO_DISK); prQ.enableConflation(sender.isBatchConflationEnabled()); if ((this.index == this.nDispatcher - 1) && this.sender.isRunning()) {
if (this.sender.getProxy() == null || this.sender.getProxy().isDestroyed()) { this.sender.initProxy(); } else { this.processor.resetBatchId(); if (this.sender.isParallel()) { con = this.sender.getProxy().acquireConnection(); sender.setServerLocation(con.getServer()); } else { synchronized (this.sender.getLockForConcurrentDispatcher()) { ServerLocation server = this.sender.getServerLocation(); if (server != null) { if (logger.isDebugEnabled()) { logger.debug("ServerLocation is: {}. Connecting to this serverLocation...", server); con = this.sender.getProxy().acquireConnection(server); } else { if (logger.isDebugEnabled()) { logger.debug("ServerLocation is null. Creating new connection. "); con = this.sender.getProxy().acquireConnection(); if (this.sender.isPrimary()) { if (sender.getServerLocation() == null) { sender.setServerLocation(con.getServer()); this.sender.setServerLocation(null);
final int batchTimeInterval = sender.getBatchTimeInterval(); final GatewaySenderStats statistics = this.sender.getStatistics(); } catch (InterruptedException e) { interrupted = true; this.sender.getCancelCriterion().checkCancelInProgress(e); continue; // keep trying } finally { for (GatewayEventFilter filter : sender.getGatewayEventFilters()) { Iterator<GatewaySenderEventImpl> itr = filteredList.iterator(); while (itr.hasNext()) { if (!transmit) { if (isDebugEnabled) { logger.debug("{}: Did not transmit event due to filtering: {}", sender.getId(), event); if (this.getSender().isParallel() && (this.getDispatcher() instanceof GatewaySenderEventCallbackDispatcher)) { Iterator<GatewaySenderEventImpl> itr = filteredList.iterator(); sender.isRemoveFromQueueOnException(), false); if (success) { if (isDebugEnabled) { if (!resetLastPeekedEvents) { while (!this.dispatcher.dispatchBatch(conflatedEventsToBeDispatched, sender.isRemoveFromQueueOnException(), true)) {
public AckReaderThread(GatewaySender sender, String name) { super("AckReaderThread for : " + name); this.setDaemon(true); this.cache = ((AbstractGatewaySender) sender).getCache(); }
if (!this.sender.isParallel()) { if (this.connection == null || this.connection.isDestroyed() || !this.connection.getServer().equals(this.sender.getServerLocation())) { if (logger.isDebugEnabled()) { logger.debug( "Initializing new connection as serverLocation of old connection is : {} and the serverLocation to connect is {}", ((this.connection == null) ? "null" : this.connection.getServer()), this.sender.getServerLocation()); InternalCache cache = this.sender.getCache(); if (cache != null && !cache.isClosed()) { if (this.sender.isPrimary() && (this.connection != null)) { if (this.ackReaderThread == null || !this.ackReaderThread.isRunning()) { this.ackReaderThread = new AckReaderThread(this.sender, this.processor);
@Before public void createParallelGatewaySenderQueue() { cache = mock(GemFireCacheImpl.class); sender = mock(AbstractGatewaySender.class); CancelCriterion cancelCriterion = mock(CancelCriterion.class); when(sender.getCancelCriterion()).thenReturn(cancelCriterion); when(sender.getCache()).thenReturn(cache); when(sender.getMaximumQueueMemory()).thenReturn(100); when(sender.getLifeCycleLock()).thenReturn(new ReentrantReadWriteLock()); metaRegionFactory = mock(MetaRegionFactory.class); queue = new ParallelGatewaySenderQueue(sender, Collections.emptySet(), 0, 1, metaRegionFactory); }
@Override public void afterCreate(EntryEvent event) { if (this.sender.isPrimary()) { // The secondary has failed over to become the primary. There is a small // window where the secondary has become the primary, but the listener // is // still set. Ignore any updates to the map at this point. It is unknown // what the state of the map is. This may result in duplicate events // being sent. return; } // There is a small window where queue has not been created fully yet. // The underlying region of the queue is created, and it receives afterDestroy callback final Set<RegionQueue> queues = this.sender.getQueues(); if (queues != null && !queues.isEmpty()) { this.sender.getStatistics().incQueueSize(); } // fix bug 35730 // Send event to the event dispatcher GatewaySenderEventImpl senderEvent = (GatewaySenderEventImpl) event.getNewValue(); this.processor.handlePrimaryEvent(senderEvent); }
/** * In case of BatchException we expect that the dispatcher has removed all the events till the * event that threw BatchException. */ public void handleException() { final GatewaySenderStats statistics = this.sender.getStatistics(); statistics.incBatchesRedistributed(); this.resetLastPeekedEvents = true; }
this.stats = sender.getStatistics(); this.sender = sender; if (this.sender.getId().contains(AsyncEventQueueImpl.ASYNC_EVENT_QUEUE_PREFIX)) { throw new AsyncEventQueueConfigurationException( String.format( "Parallel Async Event Queue %s can not be used with replicated region %s", new Object[] { AsyncEventQueueImpl.getAsyncEventQueueIdFromSenderId(this.sender.getId()), userRegion.getFullPath()})); new Object[] {this.sender.getId(), userRegion.getFullPath()})); buckToDispatchLock = new StoppableReentrantLock(sender.getCancelCriterion()); regionToDispatchedKeysMapEmpty = buckToDispatchLock.newCondition(); queueEmptyLock = new StoppableReentrantLock(sender.getCancelCriterion()); queueEmptyCondition = queueEmptyLock.newCondition(); if (sender.isBatchConflationEnabled()) { initializeConflationThreadPool();
private void createGatewaySender() { // Mock gateway sender this.sender = ParallelGatewaySenderHelper.createGatewaySender(this.cache); when(this.queueRegion.getParallelGatewaySender()).thenReturn(this.sender); when(this.sender.getQueues()).thenReturn(null); when(this.sender.getDispatcherThreads()).thenReturn(1); stats = new GatewaySenderStats(new DummyStatisticsFactory(), "ln"); when(this.sender.getStatistics()).thenReturn(stats); }
@Override public boolean done() { Set<RegionQueue> queues = ((AbstractGatewaySender) sender).getQueues(); int size = 0; for (RegionQueue q : queues) { size += q.size(); } if (size == 0) { return true; } return false; }
final GatewaySenderStats statistics = sender.getStatistics(); statistics.incBatchesRedistributed(); if (sender.isRemoveFromQueueOnException()) { e); sender.getLifeCycleLock().writeLock().lock(); try { processor.stopProcessing(); sender.clearTempEventsAfterSenderStopped(); } finally { sender.getLifeCycleLock().writeLock().unlock();
private boolean queuePrimaryEvent(GatewaySenderEventImpl gatewayEvent) throws IOException, CacheException { GatewaySenderStats statistics = this.sender.getStatistics(); if (logger.isDebugEnabled()) { logger.debug("{}: Queueing event ({}): {}", sender.getId(), (statistics.getEventsQueued() + 1), gatewayEvent); if (!sender.beforeEnqueue(gatewayEvent)) { if (logger.isDebugEnabled()) { logger.debug("Event {} is not added to queue.", gatewayEvent); getSender().getCancelCriterion().checkCancelInProgress(e); logger.debug("{}: Queued event ({}): {}", sender.getId(), (statistics.getEventsQueued()), gatewayEvent); if (!this.eventQueueSizeWarning && queueSize >= AbstractGatewaySender.QUEUE_SIZE_THRESHOLD) { logger.warn("{}: The event queue has reached {} events. Processing will continue.", sender.getId(), Integer.valueOf(AbstractGatewaySender.QUEUE_SIZE_THRESHOLD)); this.eventQueueSizeWarning = true;
protected void basicHandlePrimaryEvent(final GatewaySenderEventImpl gatewayEvent) { if (this.sender.isPrimary()) { GatewaySenderStats statistics = this.sender.getStatistics(); sender.getId(), gatewayEvent.getEventId(), gatewayEvent.getKey(), gatewayEvent.getValueAsString(true)); logger.trace( "{}: Primary create/update event {}:{}->{} remove from unprocessed events map", sender.getId(), gatewayEvent.getEventId(), gatewayEvent.getKey(), gatewayEvent.getValueAsString(true));