/** * Returns the set of members that this processor should care about. * * @return a Set of the current members * @since GemFire 5.7 */ protected Set getDistributionManagerIds() { return getDistributionManager().getDistributionManagerIds(); }
@SuppressWarnings("unchecked") public static Set<DistributedMember> getAllMembers(InternalDistributedSystem internalDS) { return new HashSet<DistributedMember>( internalDS.getDistributionManager().getDistributionManagerIds()); }
synchronized void handleDepartureOfSender() { try { if (this.receivingDM.getDistributionManagerIds().contains(this.sender)) { // sender must have sent us a NonGrantorDestroyedMessage // still need to send a reply to make the thread stop waiting respondWithDestroyed(); } } finally { if (!this.responded) { endGrantWaitStatistic(); this.responded = true; } } }
private void basicWait(long startWaitTime) throws InterruptedException { long timeout = getAckWaitThreshold() * 1000L; long timeSoFar = System.currentTimeMillis() - startWaitTime; if (timeout <= 0) { timeout = Long.MAX_VALUE; } if (!aborted.get() && !permits.tryAcquire(timeout - timeSoFar - 1, TimeUnit.MILLISECONDS)) { checkCancellation(); Set activeMembers = dm.getDistributionManagerIds(); logger.warn( "{} seconds have elapsed while waiting for replies: {} on {} whose current membership list is: [{}]", getAckWaitThreshold(), this, dm.getId(), activeMembers); permits.acquire(); // Give an info message since timeout gave a warning. logger.info("{} wait for replies completed", "InitialImageFlowControl"); } }
if (!dm.getDistributionManagerIds().contains(memberId)) { return; try { lock.wait(); memberSet = dm.getDistributionManagerIds(); } catch (InterruptedException ie) { Thread.currentThread().interrupt();
private static InternalDistributedSystem getAdminCnx() { InternalDistributedSystem.setCommandLineAdmin(true); Properties props = propertyOption; props.setProperty(LOG_LEVEL, "warning"); DistributionConfigImpl dsc = new DistributionConfigImpl(props); System.out.print("Connecting to distributed system:"); if (!"".equals(dsc.getLocators())) { System.out.println(" locators=" + dsc.getLocators()); } else { System.out.println(" mcast=" + dsc.getMcastAddress() + ":" + dsc.getMcastPort()); } InternalDistributedSystem ds = (InternalDistributedSystem) InternalDistributedSystem.connectForAdmin(props); Set existingMembers = ds.getDistributionManager().getDistributionManagerIds(); if (existingMembers.isEmpty()) { throw new RuntimeException("There are no members in the distributed system"); } return ds; }
InternalDistributedMember recipient = it.next(); if (!this.dm.getDistributionManagerIds().contains(recipient)) { if (logger.isDebugEnabled()) { logger.debug("Skipping member {} due to dist list absence", recipient);
/** * Atomically determine who is the current grantor of the given service. * * @param serviceName the name of the lock service we want the grantor of * @return the current grantor of <code>serviceName</code> and recoveryNeeded will be true if * requestor has become the grantor and needs to recover lock info. */ public GrantorInfo peekGrantor(String serviceName) { synchronized (this) { GrantorInfo gi = (GrantorInfo) this.nameToInfo.get(serviceName); if (gi != null) { waitWhileInitiatingTransfer(gi); InternalDistributedMember currentGrantor = gi.getId(); // Note that elder recovery may put GrantorInfo instances in // the map whose id is null and whose needRecovery is true if (currentGrantor != null && this.dm.getDistributionManagerIds().contains(currentGrantor)) { return gi; } else { return new GrantorInfo(null, 0, 0, true); } } else { return new GrantorInfo(null, 0, 0, false); } } }
@Override protected void process(ClusterDistributionManager dm) { this.dm = dm; // Remove this node from the set of recipients if (this.farSiders != null) { this.farSiders.remove(dm.getId()); } if (this.processorId != 0) { TXLockService.createDTLS(this.dm.getSystem()); // fix bug 38843; no-op if already created synchronized (this) { // Handle potential origin departure this.dm.addMembershipListener(this); // Assume ACK mode, defer processing until we receive a // CommitProcess message if (logger.isDebugEnabled()) { final Object key = getTrackerKey(); logger.debug("Adding key:{} class{} to tracker list", key, key.getClass().getName()); } txTracker.add(this); } if (!this.dm.getDistributionManagerIds().contains(getSender())) { memberDeparted(this.dm, getSender(), false /* don't care */); } } else { basicProcess(); } }
this.myMembershipListener = new MyMembershipListener(); dm.addMembershipListener(this.myMembershipListener); Set initialMembers = dm.getDistributionManagerIds(); this.myMembershipListener.addMembers(initialMembers);
/** * Utility method to print warning when nodeList in b2n region is found empty. This will signify * potential data loss scenario. * * @param bucketId Id of Bucket whose nodeList in b2n is empty. * @param callingMethod methodName of the calling method. */ public static void logForDataLoss(PartitionedRegion partitionedRegion, int bucketId, String callingMethod) { if (!Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "PRDebug")) { return; } Region root = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); // Region allPartitionedRegions = PartitionedRegionHelper.getPRConfigRegion( // root, partitionedRegion.getCache()); PartitionRegionConfig prConfig = (PartitionRegionConfig) root.get(partitionedRegion.getRegionIdentifier()); if (prConfig == null) return; Set members = partitionedRegion.getDistributionManager().getDistributionManagerIds(); logger.warn( "DATALOSS ( {} ) :: Size of nodeList After verifyBucketNodes for bucket ID, {} is 0", callingMethod, bucketId); logger.warn("DATALOSS ( {} ) :: NodeList from prConfig, {}", callingMethod, printCollection(prConfig.getNodes())); logger.warn("DATALOSS ( {} ) :: Current Membership List, {}", callingMethod, printCollection(members)); }
&& this.dm.getDistributionManagerIds().contains(currentGrantor)) { return gi; } else {
recipients.retainAll(this.dm.getDistributionManagerIds()); if (!recipients.isEmpty()) { if (this.txState.internalDuringIndividualSend != null) {
int sleepCount = 0; while (sleepCount < 20 && alreadyManaging.isEmpty() && this.cache.getDistributionManager().getDistributionManagerIds() .contains(p.getDistributedMember())) { sleepCount++;
DirectReplyProcessor processor) throws ConnectionException { DistributionManager dm = getDM(); Set activeMembers = dm.getDistributionManagerIds();
Set members = this.dlock.getDistributionManager().getDistributionManagerIds();
DLockRecoverGrantorProcessor.recoverLockGrantor(this.dm.getDistributionManagerIds(), // include
&& this.dm.getDistributionManagerIds().contains(previousGrantor)) {
if (!dm.getDistributionManagerIds().contains(grc.currentElder) && dm.getViewMembers().contains(grc.currentElder)) {