public Object call() throws Exception { try { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); return pr.entryCount(pr.getTXState(), pr.getDataStore() .getAllLocalPrimaryBucketIds()); } finally { } } };
@Override protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException { // This call has come to an uninitialized region. if(pr == null || !pr.isInitialized()) { return true; } LogWriterI18n l = pr.getCache().getLoggerI18n(); if (DistributionManager.VERBOSE) { l.fine("DestroyRegionOnDataStore operateOnRegion: " + pr.getFullPath()); } pr.destroyRegion(callbackArg); return true; }
@Override public void flushHDFSQueue(int maxWaitTime) { if (!this.isHDFSRegion()) { throw new UnsupportedOperationException( LocalizedStrings.HOPLOG_DOES_NOT_USE_HDFSSTORE .toLocalizedString(getName())); } HDFSFlushQueueFunction.flushQueue(this, maxWaitTime); }
@Override public int getRegionSize() { return entryCount(getTXState(), null); }
/** * Fetch the keys for the given bucket identifier, if the bucket is local or * remote. This version of the method allows you to retrieve Tombstone entries * as well as undestroyed entries. * * @param bucketNum * @param allowTombstones whether to include destroyed entries in the result * @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no * keys can be found. */ public Set getBucketKeys(int bucketNum, boolean allowTombstones) { return getBucketKeys(bucketNum, null, allowTombstones, getTXState()); }
protected RecoveryLock() { super(PartitionedRegion.this.getRegionIdentifier() + "-RecoveryLock", getCache(), false); }
public static PartitionResponse send(Set recipients, PartitionedRegion r) { final TXStateInterface tx = r.getTXState(); PartitionResponse p = new PartitionResponse(r.getSystem(), recipients, tx); DumpAllPRConfigMessage m = new DumpAllPRConfigMessage(recipients, r.getPRId(), p, tx); /*Set failures = */r.getDistributionManager().putOutgoing(m); // if (failures != null && failures.size() > 0) { // throw new PartitionedRegionCommunicationException("Failed sending ", m); // } return p; }
protected String getHDFSEventQueueName() { if (!this.getDataPolicy().withHDFS()) return null; String colocatedWith = this.getPartitionAttributes().getColocatedWith(); String eventQueueName; if (colocatedWith != null) { PartitionedRegion leader = ColocationHelper.getLeaderRegionName(this); eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(leader .getFullPath()); } else { eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(getFullPath()); } return eventQueueName; }
/** A utility to check to see if a region has been created on * all of the VMs that host the regions this region is colocated with. */ public static boolean isColocationComplete(PartitionedRegion region) { Region prRoot = PartitionedRegionHelper.getPRRoot(region .getCache()); PartitionRegionConfig config = (PartitionRegionConfig) prRoot.get(region.getRegionIdentifier()); //Fix for bug 40075. There is race between this call and the region being concurrently //destroyed. if(config == null) { Assert.assertTrue(region.isDestroyed() || region.isClosed, "Region is not destroyed, but there is no entry in the prRoot for region " + region); return false; } return config.isColocationComplete(); }
private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new BucketMovedException("Bucket region is no longer available", bucketId, region.getName()); } return br.getHoplogOrganizer(); }
public int entryCount(boolean localOnly) { if (localOnly) { if (this.isDataStore()) { return entryCount(getTXState(), new THashSet(this.dataStore.getAllLocalBucketIds())); } else { return 0; } } else { return entryCount(null); } }
public Set keysWithoutCreatesForTests() { checkReadiness(); Set<Integer> availableBuckets = new HashSet<Integer>(); for(int i =0; i < getTotalNumberOfBuckets(); i++) { if(distAdvisor.isStorageAssignedForBucket(i)) { availableBuckets.add(Integer.valueOf(i)); } } return Collections.unmodifiableSet(new KeysSet(availableBuckets, getTXState())); }
/** * Logs the b2n nodelist for specified bucketId. * * @param bId */ public void dumpB2NForBucket(int bId) { getRegionAdvisor().getBucket(bId).getBucketAdvisor().dumpProfiles( "Dumping advisor bucket meta-data for bId=" + bucketStringForLogs(bId) + " aka " + getBucketName(bId)); }
private Set<InternalDistributedMember> getAllStores(String partitionName) { if(partitionName != null){ return getFixedPartitionStores(partitionName); } final Set<InternalDistributedMember> allStores = this.prRegion.getRegionAdvisor().adviseDataStore(true); PartitionedRegionDataStore myDS = this.prRegion.getDataStore(); if (myDS != null) { allStores.add(this.prRegion.getDistributionManager().getId()); } return allStores; }
@Override protected final boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion region, long startTime) throws ForceReattemptException { PartitionedRegionDataStore dataStore = region.getDataStore(); boolean moved = dataStore.moveBucket(this.bucketId, this.source,true); region.getPrStats().endPartitionMessagesProcessing(startTime); MoveBucketReplyMessage.send( getSender(), getProcessorId(), dm, null, moved); return false; }
private HoplogOrganizer getOrganizer(PartitionedRegion region, int bucketId) { BucketRegion br = region.getDataStore().getLocalBucketById(bucketId); if (br == null) { // got rebalanced or something throw new BucketMovedException("Bucket region is no longer available", bucketId, region.getFullPath()); } return br.getHoplogOrganizer(); }
@Override protected StringBuilder getStringBuilder() { return super.getStringBuilder() .append("; prId=").append(this.partitionedRegionId) .append("; isDestroyed=").append(this.isDestroyed) .append("; isClosed=").append(this.isClosed) .append("; retryTimeout=").append(this.retryTimeout) .append("; serialNumber=").append(getSerialNumber()) .append("; hdfsStoreName=").append(getHDFSStoreName()) .append("; hdfsWriteOnly=").append(getHDFSWriteOnly()) .append("; partition attributes=").append(getPartitionAttributes()) .append("; on VM ").append(getMyId()); }
@Override protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException { // This call has come to an uninitialized region. if(pr == null || !pr.isInitialized()) { return true; } org.apache.logging.log4j.Logger logger = LogService.getLogger(); if (logger.isTraceEnabled(LogMarker.DM)) { logger.trace("DestroyRegionOnDataStore operateOnRegion: " + pr.getFullPath()); } pr.destroyRegion(callbackArg); return true; }
public boolean isRedundancyImpaired() { int numBuckets = this.prRegion.getPartitionAttributes().getTotalNumBuckets(); int targetRedundancy = this.prRegion.getPartitionAttributes().getRedundantCopies(); for (int i =0; i < numBuckets; i++) { int redundancy = this.prRegion.getRegionAdvisor().getBucketRedundancy(i); if (redundancy < targetRedundancy && redundancy != -1 || redundancy > targetRedundancy) { return true; } } return false; }
public void run2() throws CacheException { Cache cache = getCache(); PartitionedRegion pr = (PartitionedRegion)cache.getRegion(Region.SEPARATOR + PR_PREFIX + "1"); assertNotNull("Null region is " + pr.getName(),pr); pr.destroyRegion(); pr = (PartitionedRegion)cache.getRegion(Region.SEPARATOR + PR_PREFIX ); assertNotNull("Null region is " + pr.getName(),pr); pr.destroyRegion(); } };