/** * After processing the key, this method will remove it from CM. * And notifyAll for each key. * The thread needs to acquire lock of CM first. * * @param key */ public void removeAndNotifyKey(final Object key) { final LogWriterI18n logger = getCache().getLoggerI18n(); synchronized (allKeysMap) { removeAndNotifyKeyNoLock(key, logger); } }
/** * After processed the keys, this method will remove them from CM. * And notifyAll for each key. * The thread needs to acquire lock of CM first. * * @param keys */ public void removeAndNotifyKeys(Object keys[]) { final LogWriterI18n logger = getCache().getLoggerI18n(); synchronized (allKeysMap) { for (int i = 0; i < keys.length; i++) { removeAndNotifyKeyNoLock(keys[i], logger); } // for } }
/** * Fix for Bug#45917 * We are updating the seqNumber so that new seqNumbers are * generated starting from the latest in the system. * @param l */ public void updateEventSeqNum(long l) { setIfGreater(this.eventSeqNum, l); if (getCache().getLoggerI18n().fineEnabled()) { getCache().getLoggerI18n().fine( "WAN: On bucket " + getId() + " , setting the seq number as " + l + " , before GII"); } }
private RegionEntry getEntryFromFuture(Object key) { FutureResult future = new FutureResult(this.owner.getCancelCriterion()); FutureResult old = this.futures.putIfAbsent(key, future); if (old != null) { if (logger.isTraceEnabled() || DEBUG) { logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: waiting for concurrent fetch to complete for key:" + key)); } try { return (RegionEntry) old.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); this.owner.getCache().getCancelCriterion().checkCancelInProgress(null); } } return null; }
/** * Keep checking if CM has contained any key in keys. If yes, wait for notify, * then retry again. This method will block current thread for long time. * It only exits when current thread successfully save its keys into CM. * * @param keys */ public void waitUntilLocked(Object keys[]) { final String title = "BucketRegion.waitUntilLocked:"; final LogWriterI18n logger = getCache().getLoggerI18n(); while (true) { LockObject foundLock = searchAndLock(keys); if (foundLock != null) { waitForLock(foundLock, logger, title); } else { // now the keys have been locked by this thread break; } // to lock and process } // while }
/** * Search the CM for the key. If found, return the lock for the key. * Otherwise, save the key into the CM, and return null * The thread will acquire the lock before searching. * * @param key */ private LockObject searchAndLock(final Object key) { final LogWriterI18n logger = getCache().getLoggerI18n(); LockObject foundLock = null; synchronized (allKeysMap) { // check if there's any key in map if ((foundLock = searchLock(key, logger)) == null) { // save the keys when still locked addNewLock(key, logger); } } return foundLock; }
/** * Keep checking if CM has contained any key in keys. If yes, wait for notify, * then retry again. This method will block current thread for long time. * It only exits when current thread successfully save its keys into CM. * * @param key */ public void waitUntilLocked(final Object key) { final String title = "BucketRegion.waitUntilLocked:"; final LogWriterI18n logger = getCache().getLoggerI18n(); while (true) { LockObject foundLock = searchAndLock(key); if (foundLock != null) { waitForLock(foundLock, logger, title); } else { // now the keys have been locked by this thread break; } // to lock and process } // while }
private RegionEntry getEntryFromFuture(Object key) { FutureResult future = new FutureResult(this.owner.getCancelCriterion()); FutureResult old = this.futures.putIfAbsent(key, future); if (old != null) { if (logger.finerEnabled() || DEBUG) { logger.info(LocalizedStrings.DEBUG, "HDFS: waiting for concurrent fetch to complete for key:" + key); } try { return (RegionEntry) old.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); this.owner.getCache().getCancelCriterion().checkCancelInProgress(null); } } return null; }
private void destroyAllEntries(Set keysToDestroy, long batchKey) { for(Object key : keysToDestroy) { if (getCache().getLoggerI18n().fineEnabled()) { getCache() .getLoggerI18n() .fine("Destroying the entries after creating ColumnBatch " + key + " batchid " + batchKey + " total size " + this.size() + " keysToDestroy size " + keysToDestroy.size()); } EntryEventImpl event = EntryEventImpl.create( getPartitionedRegion(), Operation.DESTROY, null, null, null, false, this.getMyId()); event.setKey(key); event.setBucketId(this.getId()); TXStateInterface txState = event.getTXState(this); if (txState != null) { event.setRegion(this); txState.destroyExistingEntry(event, true, null); } else { this.getPartitionedRegion().basicDestroy(event,true,null); } } if (getCache().getLoggerI18n().fineEnabled()) { getCache() .getLoggerI18n() .fine("Destroyed all for batchID " + batchKey + " total size " + this.size()); } }
@Override public void invokeTXCallbacks( final EnumListenerEvent eventType, final EntryEventImpl event, final boolean callDispatchListenerEvent, final boolean notifyGateway) { if (getCache().getLogger().fineEnabled()) { getCache().getLogger().fine("BR.invokeTXCallbacks for event " + event); } // bucket events may make it to this point even though the bucket is still // initializing. We can't block while initializing or a GII state flush // may hang, so we avoid notifying the bucket if (this.isInitialized()) { boolean callThem = callDispatchListenerEvent; if (event.isPossibleDuplicate() && this.eventTracker.isInitialImageProvider(event.getDistributedMember())) { callThem = false; } super.invokeTXCallbacks(eventType, event, callThem, notifyGateway); } final EntryEventImpl prevent = createEventForPR(event); try { this.partitionedRegion.invokeTXCallbacks(eventType, prevent, this.partitionedRegion.isInitialized() ? callDispatchListenerEvent : false, false); } finally { prevent.release(); } }
@Override public void invokePutCallbacks( final EnumListenerEvent eventType, final EntryEventImpl event, final boolean callDispatchListenerEvent, boolean notifyGateways) { if (getCache().getLogger().finerEnabled()) { getCache().getLogger().finer("invoking put callbacks on bucket for event " + event); } // bucket events may make it to this point even though the bucket is still // initializing. We can't block while initializing or a GII state flush // may hang, so we avoid notifying the bucket if (this.isInitialized()) { boolean callThem = callDispatchListenerEvent; if (callThem && event.isPossibleDuplicate() && this.eventTracker.isInitialImageProvider(event.getDistributedMember())) { callThem = false; } super.invokePutCallbacks(eventType, event, callThem, notifyGateways); } final EntryEventImpl prevent = createEventForPR(event); try { this.partitionedRegion.invokePutCallbacks(eventType, prevent, this.partitionedRegion.isInitialized() ? callDispatchListenerEvent : false, false); } finally { prevent.release(); } }
public void takeSnapshotGIIReadLock() { if (readLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.takeSnapshotGIIReadLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Taking readonly snapshotGIILock on bucket " + this); } snapshotGIILock.attemptLock(LockMode.SH, -1, giiReadLockForSIOwner); } } }
protected void distributeDestroyOperation (EntryEventImpl event) { if (DistributionManager.VERBOSE) { getCache().getLoggerI18n().info( LocalizedStrings.DEBUG, "BR.basicDestroy: this cache has already seen this event " + event); } if (!event.isOriginRemote() && getBucketAdvisor().isPrimary()) { // This cache has processed the event, forward operation // and event messages to backup buckets event.setOldValueFromRegion(); new DestroyOperation(event).distribute(); } event.invokeCallbacks(this,true, false); }
public void releaseSnapshotGIIReadLock() { if (readLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIReadLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing readonly snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.releaseLock(LockMode.SH, false, giiReadLockForSIOwner); } } }
@Override protected void basicDestroyBeforeRemoval(RegionEntry entry, EntryEventImpl event) { // Assumed this is called with entry synchrony if (!event.isOriginRemote() && !event.getOperation().isLocal() && !Operation.EVICT_DESTROY.equals(event.getOperation()) && !(event.isExpiration() && isEntryEvictDestroyEnabled())) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { LogWriterI18n log = getCache().getLoggerI18n(); VersionTag v = entry.generateVersionTag(null, false, false, this, event); if (log.fineEnabled() && v != null) { log.fine("generated version tag " + v + /*" for " + event.getKey() +*/ " in region " + this.getName()); } } // This code assumes it is safe ignore token mode (GII in progress) // because it assume when the origin of the event is local, // then GII has completed (the region has been completely initialized) // This code assumes that this bucket is primary new DestroyOperation(event).distribute(); } super.basicDestroyBeforeRemoval(entry, event); }
@Override void basicInvalidatePart2(final RegionEntry re, final EntryEventImpl event, boolean clearConflict, boolean invokeCallbacks) { // Assumed this is called with the entry synchronized if (!event.isOriginRemote()) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { LogWriterI18n log = getCache().getLoggerI18n(); VersionTag v = re.generateVersionTag(null, false, false, this, event); if (log.fineEnabled() && v != null) { log.fine("generated version tag " + v + /*" for " + event.getKey() +*/ " in region " + this.getName()); } event.setVersionTag(v); } // This code assumes it is safe ignore token mode (GII in progress) // because it assumes when the origin of the event is local, // the GII has completed and the region is initialized and open for local // ops // This code assumes that this bucket is primary // distribute op to bucket secondaries and event to other listeners InvalidateOperation op = new InvalidateOperation(event); op.distribute(); } super.basicInvalidatePart2(re, event, clearConflict /*Clear conflict occurred */, invokeCallbacks); }
public boolean takeSnapshotGIIWriteLock(MembershipListener listener) { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); return bufferRegion.takeSnapshotGIIWriteLock(listener); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Taking exclusive snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.attemptLock(LockMode.EX, -1, giiWriteLockForSIOwner); getBucketAdvisor() .addMembershipListenerAndAdviseGeneric(listener); snapshotGIILocked = true; this.giiListener = listener; // Set the listener only after taking the write lock. if (logger.fineEnabled()) { logger.fine("Succesfully took exclusive lock on bucket " + this.getName()); } return true; } } else { return false; } }
public void releaseSnapshotGIIWriteLock() { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIWriteLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing exclusive snapshotGIILock on bucket " + this.getName()); } if (this.snapshotGIILock.hasExclusiveLock(giiWriteLockForSIOwner, null)) { if (snapshotGIILocked) { snapshotGIILock.releaseLock(LockMode.EX, false, giiWriteLockForSIOwner); getBucketAdvisor().removeMembershipListener(giiListener); this.giiListener = null; snapshotGIILocked = false; } } if (logger.fineEnabled()) { logger.fine("Released exclusive snapshotGIILock on bucket " + this.getName()); } } } }
@Override void basicUpdateEntryVersion(EntryEventImpl event) throws EntryNotFoundException { Assert.assertTrue(!isTX()); Assert.assertTrue(event.getOperation().isDistributed()); beginLocalWrite(event); try { if (!hasSeenEvent(event)) { this.entries.updateEntryVersion(event); } else { if (DistributionManager.VERBOSE) { getCache().getLoggerI18n().info( LocalizedStrings.DEBUG, "BR.basicUpdateEntryVersion: this cache has already seen this event " + event); } } if (!event.isOriginRemote() && getBucketAdvisor().isPrimary()) { // This cache has processed the event, forward operation // and event messages to backup buckets new UpdateEntryVersionOperation(event).distribute(); } return; } finally { endLocalWrite(event); } }
private ConcurrentParallelGatewaySenderQueue getHDFSQueue() throws ForceReattemptException { if (this.hdfsQueue == null) { String asyncQId = this.owner.getPartitionedRegion().getHDFSEventQueueName(); final AsyncEventQueueImpl asyncQ = (AsyncEventQueueImpl)this.owner.getCache().getAsyncEventQueue(asyncQId); final ParallelGatewaySenderImpl gatewaySender = (ParallelGatewaySenderImpl)asyncQ.getSender(); AbstractGatewaySenderEventProcessor ep = gatewaySender.getEventProcessor(); if (ep == null) return null; hdfsQueue = (ConcurrentParallelGatewaySenderQueue)ep.getQueue(); } // Check whether the queue has become primary here. // There could be some time between bucket becoming a primary // and underlying queue becoming a primary, so isPrimaryWithWait() // waits for some time for the queue to become a primary on this member final HDFSBucketRegionQueue brq = hdfsQueue.getBucketRegionQueue( this.owner.getPartitionedRegion(), this.owner.getId()); if (brq != null) { if (owner.getBucketAdvisor().isPrimary() && !brq.getBucketAdvisor().isPrimaryWithWait()) { InternalDistributedMember primaryHolder = brq.getBucketAdvisor() .basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + brq.getName() + " is not primary. Current primary holder is " + primaryHolder); } } return hdfsQueue; }