@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (logger.isDebugEnabled()) { logger.debug("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
public Set<Integer> getAllLocalPrimaryBucketIdsBetweenProvidedIds(int low, int high) { Set<Integer> bucketIds = new HashSet<Integer>(); for (Map.Entry<Integer, BucketRegion> bucketEntry : getAllLocalBuckets()) { BucketRegion bucket = bucketEntry.getValue(); if (bucket.getBucketAdvisor().isPrimary() && (bucket.getId() >= low) && (bucket.getId() < high)) { bucketIds.add(Integer.valueOf(bucket.getId())); } } return bucketIds; }
protected void invokePartitionListenerAfterBucketCreated() { PartitionListener[] partitionListeners = getPartitionedRegion().getPartitionListeners(); if (partitionListeners == null || partitionListeners.length == 0) { return; } for (int i = 0; i < partitionListeners.length; i++) { PartitionListener listener = partitionListeners[i]; if (listener != null) { listener.afterBucketCreated(getId(), keySet()); } } }
public String description() { return "Expected bucket entries for bucket: " + bucket.getId() + " is: 0 but actual entries: " + bucket.keySet().size() + " This bucket isPrimary: " + bucket.getBucketAdvisor().isPrimary() + " KEYSET: " + bucket.keySet(); } };
protected void releaseHoplogOrganizer() { // release resources during a clean transition HoplogOrganizer hdfs = hoplog.getAndSet(null); if (hdfs != null) { getPartitionedRegion().hdfsManager.close(getId()); } }
/** a fast estimate of total bucket size */ public long getEstimatedLocalBucketSize(boolean primaryOnly) { long size = 0; for (BucketRegion br : localBucket2RegionMap.values()) { if (!primaryOnly || br.getBucketAdvisor().isPrimary()) { size += br.getEstimatedLocalSize(); } } return size; }
if (breg.isDestroyed()) { SanityManager.DEBUG_PRINT(dumpFlag, gfc.getTableName() + " bucket " + preg.bucketStringForLogs(breg.getId()) + " is destroyed. skipping here " + cache.getMyId()); continue; boolean isPrimary = breg.getBucketAdvisor().isPrimary(); sb.append("======== Bucket ID=") .append(preg.bucketStringForLogs(breg.getId())) .append(" region=").append(breg.getName()) .append(" primary=").append(isPrimary) .append(" contents: ========"); final Iterator<?> iter = breg.getBestLocalIterator(true); while (iter.hasNext()) { RegionEntry entry = (RegionEntry) iter.next(); breg.dumpBackingMap();
this.versions = new VersionedObjectList(this.removeAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled()); try { if(removeAllPRData.length > 0) { if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) { if (logger.isDebugEnabled()) { logger.debug("attempting to locate version tags for retried event"); removeAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(removeAllPRData[i].getEventID()); if (removeAllPRData[i].versionTag != null) { removeAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember()); ThreadIdentifier membershipID = new ThreadIdentifier( eventID.getMembershipID(), eventID.getThreadID()); bucketRegion.recordBulkOpStart(membershipID); bucketRegion.waitUntilLocked(keys); boolean lockedForPrimary = false; final ArrayList<Object> succeeded = new ArrayList<Object>(); Object key = keys[0]; try { bucketRegion.doLockForPrimary(false); lockedForPrimary = true; bucketRegion.getDataView().postRemoveAll(op, this.versions, bucketRegion); } finally { if (lockedForPrimary) { bucketRegion.doUnlockForPrimary();
public boolean takeSnapshotGIIWriteLock(MembershipListener listener) { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); return bufferRegion.takeSnapshotGIIWriteLock(listener); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Taking exclusive snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.attemptLock(LockMode.EX, -1, giiWriteLockForSIOwner); getBucketAdvisor() .addMembershipListenerAndAdviseGeneric(listener); snapshotGIILocked = true; this.giiListener = listener; // Set the listener only after taking the write lock. if (logger.fineEnabled()) { logger.fine("Succesfully took exclusive lock on bucket " + this.getName()); } return true; } } else { return false; } }
public void releaseSnapshotGIIWriteLock() { if (writeLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIWriteLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing exclusive snapshotGIILock on bucket " + this.getName()); } if (this.snapshotGIILock.hasExclusiveLock(giiWriteLockForSIOwner, null)) { if (snapshotGIILocked) { snapshotGIILock.releaseLock(LockMode.EX, false, giiWriteLockForSIOwner); getBucketAdvisor().removeMembershipListener(giiListener); this.giiListener = null; snapshotGIILocked = false; } } if (logger.fineEnabled()) { logger.fine("Released exclusive snapshotGIILock on bucket " + this.getName()); } } } }
protected void distributeUpdateOperation(EntryEventImpl event, long lastModified) { if (!event.isOriginRemote() && !event.isNetSearch() && getBucketAdvisor().isPrimary()) { if (event.isBulkOpInProgress()) { // consolidate the UpdateOperation for each entry into a PutAllMessage // since we did not call basicPutPart3(), so we have to explicitly addEntry here event.getPutAllOperation().addEntry(event, this.getId()); } else { new UpdateOperation(event, lastModified).distribute(); if (logger.isDebugEnabled()) { logger.debug("sent update operation : for region : {}: with event: {}", this.getName(), event); } } } if (!event.getOperation().isPutAll()) { // putAll will invoke listeners later event.invokeCallbacks(this, true, true); } }
public void releaseSnapshotGIIReadLock() { if (readLockEnabled()) { if (this.getPartitionedRegion().isInternalColumnTable()) { BucketRegion bufferRegion = getBufferRegion(); bufferRegion.releaseSnapshotGIIReadLock(); } else { final LogWriterI18n logger = getCache().getLoggerI18n(); if (logger.fineEnabled()) { logger.fine("Releasing readonly snapshotGIILock on bucket " + this.getName()); } snapshotGIILock.releaseLock(LockMode.SH, false, giiReadLockForSIOwner); } } }
private Set createEntriesSet(IteratorType type) throws ForceReattemptException { ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return Collections.emptySet(); HDFSBucketRegionQueue brq = q.getBucketRegionQueue(this.owner.getPartitionedRegion(), owner.getId()); return new HDFSEntriesSet(owner, brq, owner.getHoplogOrganizer(), type, refs); }
public void checkForPrimary() { final boolean isp = getBucketAdvisor().isPrimary(); if (! isp){ this.partitionedRegion.checkReadiness(); checkReadiness(); InternalDistributedMember primaryHolder = getBucketAdvisor().basicGetPrimaryMember(); throw new PrimaryBucketException("Bucket " + getName() + " is not primary. Current primary holder is "+primaryHolder); } }
@Override public String toString() { return new StringBuilder() .append("BucketRegion") .append("[path='").append(getFullPath()) .append(";serial=").append(getSerialNumber()) .append(";primary=").append(getBucketAdvisor().getProxyBucketRegion().isPrimary()) .append(";indexUpdater=").append(getIndexUpdater()) .append("]") .toString(); }
public void run() { PartitionedRegion region = (PartitionedRegion) getCache().getRegion("region1"); PartitionedRegionDataStore dataStore = region.getDataStore(); for(int i =1; i <= 6; i++) { BucketRegion bucket = dataStore.getLocalBucketById(i); assertEquals(1, bucket.getNumOverflowOnDisk()); assertEquals(0, bucket.getNumEntriesInVM()); //the size recorded on disk is not the same is the in memory size, apparently assertTrue("Bucket size was " + bucket.getNumOverflowBytesOnDisk(), 1 < bucket.getNumOverflowBytesOnDisk()); assertEquals(bucket.getNumOverflowBytesOnDisk(), bucket.getTotalBytes()); } } });
private Object getFromHdfs() { if (type == null) { return hdfs.getValue(); } switch (type) { case KEYS: byte[] key = this.currentHdfsKey; return deserialize ? EntryEventImpl.deserialize(key) : key; case VALUES: PersistedEventImpl evt = hdfs.getValue(); return evt.getValue(); default: Object keyObj = EntryEventImpl.deserialize(this.currentHdfsKey); if(keyObj instanceof KeyWithRegionContext) { ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion()); } return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, hdfs.getValue(), true, forUpdate); } }
@Override public void checkReadiness() { super.checkReadiness(); if (isDestroyed()) { throw new RegionDestroyedException(toString(), getFullPath()); } }
public int getSizeForEviction() { EvictionAttributes ea = this.getAttributes().getEvictionAttributes(); if (ea == null) return 0; EvictionAlgorithm algo = ea.getAlgorithm(); if (!algo.isLRUHeap()) return 0; EvictionAction action = ea.getAction(); int size = action.isLocalDestroy() ? this.getRegionMap().sizeInVM() : (int)this .getNumEntriesInVM(); return size; } @Override
Map usedIndexes = indexObserver.getUsedIndexes(bucket.getFullPath()); StringBuffer buf = new StringBuffer(); buf.append(" indexesUsed("); for (Iterator itr = usedIndexes.entrySet().iterator(); itr.hasNext();) { Map.Entry entry = (Map.Entry) itr.next(); buf.append(entry.getKey().toString() + "(Results: " + entry.getValue() + ", Bucket: " + bucket.getId()+")"); if (itr.hasNext()) { buf.append(",");