public static long getCounterForBuckets(String prRegionName) { long bucketSize = 0; final PartitionedRegion pr = (PartitionedRegion)cache .getRegion(prRegionName); for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i .hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } getLogWriter().info( "Size of bucket " + bucketRegion.getName() + "of Pr " + prRegionName + " = " + bucketRegion.getCounter() / (1000000)); bucketSize = bucketSize + bucketRegion.getCounter(); } return bucketSize; }
@Override void basicInvalidatePart2(final RegionEntry re, final EntryEventImpl event, boolean clearConflict, boolean invokeCallbacks) { // Assumed this is called with the entry synchronized if (!event.isOriginRemote()) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { VersionTag v = re.generateVersionTag(null, false, this, event); if (logger.isDebugEnabled() && v != null) { logger.debug("generated version tag {} in region {}", v, this.getName()); } event.setVersionTag(v); } // This code assumes it is safe ignore token mode (GII in progress) // because it assumes when the origin of the event is local, // the GII has completed and the region is initialized and open for local // ops // This code assumes that this bucket is primary // distribute op to bucket secondaries and event to other listeners InvalidateOperation op = new InvalidateOperation(event); op.distribute(); } super.basicInvalidatePart2(re, event, clearConflict /*Clear conflict occurred */, invokeCallbacks); }
public void checkForPrimary() { final boolean isp = getBucketAdvisor().isPrimary(); if (! isp){ this.partitionedRegion.checkReadiness(); checkReadiness(); InternalDistributedMember primaryHolder = getBucketAdvisor().basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + getName() + " is not primary. Current primary holder is "+primaryHolder); } }
public void checkForPrimary() { final boolean isp = getBucketAdvisor().isPrimary(); if (! isp){ this.partitionedRegion.checkReadiness(); checkReadiness(); InternalDistributedMember primaryHolder = getBucketAdvisor().basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + getName() + " is not primary. Current primary holder is "+primaryHolder); } }
@Override protected void basicDestroyBeforeRemoval(RegionEntry entry, EntryEventImpl event) { // Assumed this is called with entry synchrony if (!event.isOriginRemote() && !event.isBulkOpInProgress() && !event.getOperation().isLocal() && !Operation.EVICT_DESTROY.equals(event.getOperation()) && !(event.isExpiration() && isEntryEvictDestroyEnabled())) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { VersionTag v = entry.generateVersionTag(null, false, this, event); if (logger.isDebugEnabled() && v != null) { logger.debug("generated version tag {} in region {}", v, this.getName()); } } // This code assumes it is safe ignore token mode (GII in progress) // because it assume when the origin of the event is local, // then GII has completed (the region has been completely initialized) // This code assumes that this bucket is primary new DestroyOperation(event).distribute(); } super.basicDestroyBeforeRemoval(entry, event); }
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (logger.isDebugEnabled()) { logger.debug("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
protected void distributeUpdateOperation(EntryEventImpl event, long lastModified) { if (!event.isOriginRemote() && !event.isNetSearch() && getBucketAdvisor().isPrimary()) { if (event.isBulkOpInProgress()) { // consolidate the UpdateOperation for each entry into a PutAllMessage // since we did not call basicPutPart3(), so we have to explicitly addEntry here event.getPutAllOperation().addEntry(event, this.getId()); } else { new UpdateOperation(event, lastModified).distribute(); if (logger.isDebugEnabled()) { logger.debug("sent update operation : for region : {}: with event: {}", this.getName(), event); } } } if (!event.getOperation().isPutAll()) { // putAll will invoke listeners later event.invokeCallbacks(this, true, true); } }
if (logger.isDebugEnabled()) { logger.debug("getLocally: key {}) bucketId={}{}{} region {} returnTombstones {} allowReadFromHDFS {}", key, this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones, allowReadFromHDFS);
public boolean takeSnapshotGIIWriteLock(MembershipListener listener) { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); return bufferRegion.takeSnapshotGIIWriteLock(listener); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Taking exclusive snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.attemptLock(LockMode.EX, -1, giiWriteLockForSIOwner); getBucketAdvisor() .addMembershipListenerAndAdviseGeneric(listener); snapshotGIILocked = true; this.giiListener = listener; // Set the listener only after taking the write lock. if (logger.fineEnabled()) { logger.fine("Succesfully took exclusive lock on bucket " + this.getName()); } return true; } } else { return false; } }
if (logger.isDebugEnabled()) { logger.debug("containsKeyLocally: key {}) bucketId={}{}{} region {} returns {}", key, this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), ret);
public void releaseSnapshotGIIReadLock() { if (readLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIReadLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing readonly snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.releaseLock(LockMode.SH, false, giiReadLockForSIOwner); } } }
public void releaseSnapshotGIIWriteLock() { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIWriteLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing exclusive snapshotGIILock on bucket " + this.getName()); } if (this.snapshotGIILock.hasExclusiveLock(giiWriteLockForSIOwner, null)) { if (snapshotGIILocked) { snapshotGIILock.releaseLock(LockMode.EX, false, giiWriteLockForSIOwner); getBucketAdvisor().removeMembershipListener(giiListener); this.giiListener = null; snapshotGIILocked = false; } } if (logger.fineEnabled()) { logger.fine("Released exclusive snapshotGIILock on bucket " + this.getName()); } } } }
this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, keyInfo.getBucketId(), bucketRegion.getName());
logger.fine("getLocally: key(" + key + ") bucketId=" + partitionedRegion.bucketStringForLogs(bucketId) + " region " + bucketRegion.getName() + " returnTombstones=" + returnTombstones +" allowReadFromHDFS="+allowReadFromHDFS);
@Override protected void basicDestroyBeforeRemoval(RegionEntry entry, EntryEventImpl event) { // Assumed this is called with entry synchrony if (!event.isOriginRemote() && !event.getOperation().isLocal() && !Operation.EVICT_DESTROY.equals(event.getOperation()) && !(event.isExpiration() && isEntryEvictDestroyEnabled())) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { LogWriterI18n log = getCache().getLoggerI18n(); VersionTag v = entry.generateVersionTag(null, false, false, this, event); if (log.fineEnabled() && v != null) { log.fine("generated version tag " + v + /*" for " + event.getKey() +*/ " in region " + this.getName()); } } // This code assumes it is safe ignore token mode (GII in progress) // because it assume when the origin of the event is local, // then GII has completed (the region has been completely initialized) // This code assumes that this bucket is primary new DestroyOperation(event).distribute(); } super.basicDestroyBeforeRemoval(entry, event); }
if (v != null) { if (logger.isDebugEnabled()) { logger.debug("generated version tag {} in region {}", v, this.getName());
logger.fine("getSerializedLocally: key " + key + ") bucketId=" + partitionedRegion.bucketStringForLogs(bucketId) + " region " + bucketRegion.getName());
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (getLogWriterI18n().fineEnabled()) { getLogWriterI18n().fine("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
@Override void basicInvalidatePart2(final RegionEntry re, final EntryEventImpl event, boolean clearConflict, boolean invokeCallbacks) { // Assumed this is called with the entry synchronized if (!event.isOriginRemote()) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { LogWriterI18n log = getCache().getLoggerI18n(); VersionTag v = re.generateVersionTag(null, false, false, this, event); if (log.fineEnabled() && v != null) { log.fine("generated version tag " + v + /*" for " + event.getKey() +*/ " in region " + this.getName()); } event.setVersionTag(v); } // This code assumes it is safe ignore token mode (GII in progress) // because it assumes when the origin of the event is local, // the GII has completed and the region is initialized and open for local // ops // This code assumes that this bucket is primary // distribute op to bucket secondaries and event to other listeners InvalidateOperation op = new InvalidateOperation(event); op.distribute(); } super.basicInvalidatePart2(re, event, clearConflict /*Clear conflict occurred */, invokeCallbacks); }
logger.fine("containsKeyLocally: key " + key + ") bucketId=" + partitionedRegion.bucketStringForLogs(bucketId) + " region " + bucketRegion.getName() + " returns " + ret);