/** * check to see if the cache is closing */ public boolean checkCacheClosing(ClusterDistributionManager dm) { if (dm == null) { return true; } InternalCache cache = dm.getCache(); return cache == null || cache.isClosed(); }
try { setIsStartupThread(Boolean.TRUE); startThreads(); this.getCancelCriterion().checkCancelInProgress(null); boolean interrupted = Thread.interrupted(); try { } finally { if (!finishedConstructor) { askThreadsToStop(); // fix for bug 42039
@Override public Set<InternalDistributedMember> getMembersInThisZone() { return getMembersInSameZone(getDistributionManagerId()); }
@Override public Set<InternalDistributedMember> getAllOtherMembers() { Set<InternalDistributedMember> result = new HashSet<>(getDistributionManagerIdsIncludingAdmin()); result.remove(getDistributionManagerId()); return result; }
void setRedundancyZone(InternalDistributedMember member, String redundancyZone) { if (redundancyZone != null && !redundancyZone.equals("")) { this.redundancyZones.put(member, redundancyZone); } if (member != getDistributionManagerId()) { String relationship = areInSameZone(getDistributionManagerId(), member) ? "" : "not "; Object[] logArgs = new Object[] {member, relationship}; logger.info("Member {} is {} equivalent or in the same redundancy zone.", logArgs); } }
when(system.getCache()).thenReturn(cache); when(distributionManager.getId()).thenReturn(member); when(distributionManager.getDistributionManagerId()).thenReturn(member); when(distributionManager.getConfig()).thenReturn(config); when(distributionManager.getSystem()).thenReturn(system); when(distributionManager.getCancelCriterion()).thenReturn(systemCancelCriterion); when(distributionManager.getCache()).thenReturn(cache); when(distributionManager.getExistingCache()).thenReturn(cache);
} else if (isDedicatedAdminVM()) { vmKind = ADMIN_ONLY_DM_TYPE; new ClusterDistributionManager(system, transport, system.getAlertingService()); distributionManager.assertDistributionManagerType(); InternalDistributedMember id = distributionManager.getDistributionManagerId(); if (!"".equals(id.getName())) { for (InternalDistributedMember m : distributionManager .getViewMembers()) { if (m.equals(id)) { if (distributionManager.getMembershipManager().verifyMember(m, "member is using the name of " + id)) { throw new IncompatibleSystemException("Member " + id distributionManager.addNewMember(id); // add ourselves if (!distributionManager.sendStartupMessage(op)) { if (distributionManager.getOtherDistributionManagerIds().size() == 0) { logger.info("Did not hear back from any other system. I am the first one."); } else if (transport.isMcastEnabled()) { if (!distributionManager.testMulticast()) { logger.warn( "Did not receive a startup response but other members exist. Multicast does not seem to be working."); throw ex;
String rejectionMessage = null; final boolean isAdminDM = dm.getId().getVmKind() == ClusterDistributionManager.ADMIN_ONLY_DM_TYPE || dm.getId().getVmKind() == ClusterDistributionManager.LOCATOR_DM_TYPE; if (dm.getTransport().isMcastEnabled() != isMcastEnabled) { rejectionMessage = String.format( && dm.getSystem().getOriginalConfig().getMcastPort() != getMcastPort()) { rejectionMessage = String.format( "Rejected new system node %s because its mcast-port %s does not match the mcast-port %s of the distributed system it is attempting to join. To fix this make sure the mcast-port gemfire property is set the same on all members of the same distributed system.", new Object[] {getSender(), getMcastPort(), dm.getSystem().getOriginalConfig().getMcastPort()}); } else if (isMcastEnabled && !checkMcastAddress(dm.getSystem().getOriginalConfig().getMcastAddress(), getMcastHostAddress())) { rejectionMessage = "Rejected new system node %s because its mcast-address %s does not match the mcast-address %s of the distributed system it is attempting to join. To fix this make sure the mcast-address gemfire property is set the same on all members of the same distributed system.", new Object[] {getSender(), getMcastHostAddress(), dm.getSystem().getOriginalConfig().getMcastAddress()}); } else if (dm.getTransport().isTcpDisabled() != isTcpDisabled) { rejectionMessage = String.format( "Rejected new system node %s because isTcpDisabled=%s does not match the distributed system it is attempting to join.", new Object[] {getSender(), Boolean.valueOf(isTcpDisabled)}); } else if (dm.getDistributedSystemId() != DistributionConfig.DEFAULT_DISTRIBUTED_SYSTEM_ID
Cache cache = dm.getExistingCache(); Region region = cache.getRegion(this.regionPath); PersistenceAdvisor persistenceAdvisor = null; } else if (region == null) { Bucket proxy = PartitionedRegionHelper.getProxyBucketRegion(dm.getCache(), this.regionPath, false); if (proxy != null) { persistenceAdvisor = proxy.getPersistenceAdvisor(); replyMsg.setException(exception); dm.putOutgoing(replyMsg);
} catch (UnknownHostException e) { if (getViewMembers().size() > 1) { throw new SystemConnectException( "Unable to examine network cards and other members exist"); setEquivalentHosts(equivs); setEnforceUniqueZone(getConfig().getEnforceUniqueHost()); String redundancyZone = getConfig().getRedundancyZone(); if (redundancyZone != null && !redundancyZone.equals("")) { setEnforceUniqueZone(true); setRedundancyZone(getDistributionManagerId(), redundancyZone); if (logger.isDebugEnabled()) { StringBuffer sb = new StringBuffer(); Set<InternalDistributedMember> allOthers = new HashSet<>(getViewMembers()); allOthers.remove(getDistributionManagerId()); enforceUniqueZone()); } catch (Exception re) { throw new SystemConnectException( printStacks(allOthers, false); "Forcing an elder join event since a startup response was not received from elder {}.", e); handleManagerStartup(e);
@Test public void getElderStateAsElder() { Supplier<ElderState> elderStateSupplier = mock(Supplier.class); ElderState elderState = mock(ElderState.class); when(elderStateSupplier.get()).thenReturn(elderState); ClusterElderManager clusterElderManager = new ClusterElderManager(clusterDistributionManager, elderStateSupplier); when(clusterDistributionManager.getId()).thenReturn(member0); when(clusterDistributionManager.getViewMembers()).thenReturn(Arrays.asList(member0, member1)); assertThat(clusterElderManager.getElderState(false)).isEqualTo(elderState); verify(elderStateSupplier, times(1)).get(); }
dm.getCancelCriterion().checkCancelInProgress(null); boolean interrupted = Thread.interrupted(); try { dm.getMembershipManager().waitForMessageState(getSender(), channelState); break; } catch (InterruptedException ignore) { if (isSingleFlushTo) { ga.sendingMember = dm.getDistributionManagerId(); } else { ga.sendingMember = getSender(); logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {}", ga); if (requestingMember.equals(dm.getDistributionManagerId())) { ga.dmProcess(dm); } else { dm.putOutgoing(ga);
msg.setDistributedSystemId(dm.getConfig().getDistributedSystemId()); msg.setRedundancyZone(redundancyZone); msg.setEnforceUniqueZone(enforceUniqueZone); msg.setMcastEnabled(transport.isMcastEnabled()); msg.setMcastPort(dm.getSystem().getOriginalConfig().getMcastPort()); msg.setMcastHostAddress(dm.getSystem().getOriginalConfig().getMcastAddress()); msg.setTcpDisabled(transport.isTcpDisabled()); msg.setRecipients(recipients); msg.setReplyProcessorId(proc.getProcessorId()); this.newlyDeparted = dm.sendOutgoing(msg); // set of departed jgroups ids if (this.newlyDeparted != null && !this.newlyDeparted.isEmpty()) { this.dm.handleManagerDeparture(id, false, "left the membership view"); proc.memberDeparted(this.dm, id, true); for (Iterator it = unresponsive.iterator(); it.hasNext();) { InternalDistributedMember um = (InternalDistributedMember) it.next(); if (!dm.getViewMembers().contains(um)) { dm.handleManagerDeparture(um, true, "disappeared during startup handshake"); } else if (dm.isCurrentMember(um)) { this.dm.setUnfinishedStartups(unresponsive);
@Test public void waitForElderReturnsFalseIfWeAreElder() { ClusterElderManager clusterElderManager = new ClusterElderManager(clusterDistributionManager); when(clusterDistributionManager.getId()).thenReturn(member0); when(clusterDistributionManager.isCurrentMember(eq(member1))).thenReturn(true); when(clusterDistributionManager.getViewMembers()).thenReturn(Arrays.asList(member0, member1)); assertThat(clusterElderManager.waitForElder(member1)).isFalse(); }
public static void sendMessage(InternalDistributedMember recipient, int processorId, Object obj, ClusterDistributionManager distributionManager, Object aCallbackArgument, Exception e, boolean isSerialized, boolean requestorTimedOut) { // create a message NetLoadReplyMessage msg = new NetLoadReplyMessage(); msg.initialize(processorId, obj, aCallbackArgument, e, isSerialized, requestorTimedOut); msg.setRecipient(recipient); distributionManager.putOutgoing(msg); }
this.useNative = useNative; Set recips = new HashSet(recipients); DistributedMember me = originDm.getDistributionManagerId(); if (recips.contains(me)) { recips.remove(me); this.processorId = cp.getProcessorId(); originDm.putOutgoing(this); if (cp != null) { try {
new MembershipChangeListener(); clusterDistributionManager.addMembershipListener(changeListener); if (clusterDistributionManager.isCloseInProgress()) { return false; return true; if (!clusterDistributionManager.isCurrentMember(desiredElder)) { return false; // no longer present if (!clusterDistributionManager.getId().equals(desiredElder) && clusterDistributionManager.getId().equals(currentElder)) { clusterDistributionManager.removeMembershipListener(changeListener);
@Before public void before() { member0 = mock(InternalDistributedMember.class); clusterDistributionManager = mock(ClusterDistributionManager.class); cancelCriterion = mock(CancelCriterion.class); system = mock(InternalDistributedSystem.class); systemCancelCriterion = mock(CancelCriterion.class); memberManager = mock(MembershipManager.class); when(clusterDistributionManager.getCancelCriterion()).thenReturn(cancelCriterion); when(clusterDistributionManager.getSystem()).thenReturn(system); when(system.getCancelCriterion()).thenReturn(systemCancelCriterion); when(clusterDistributionManager.getMembershipManager()).thenReturn(memberManager); }
JmxManagerProfile p = null; try { final InternalCache cache = dm.getCache(); if (cache != null && !cache.isClosed()) { final JmxManagerAdvisor adv = cache.getJmxManagerAdvisor(); } finally { if (thr != null) { dm.getCancelCriterion().checkCancelInProgress(null); logger.info(String.format("This member caught exception processing profile %s %s", p, toString()), thr);
@Test public void testProcessWithWaitForCurrentOperationsThatTimesOut() { InternalDistributedMember relayRecipient = mock(InternalDistributedMember.class); ClusterDistributionManager dm = mock(ClusterDistributionManager.class); InternalCache gfc = mock(InternalCache.class); DistributedRegion region = mock(DistributedRegion.class); CacheDistributionAdvisor distributionAdvisor = mock(CacheDistributionAdvisor.class); when(dm.getDistributionManagerId()).thenReturn(relayRecipient); when(dm.getExistingCache()).thenReturn(gfc); when(region.isInitialized()).thenReturn(true); when(region.getDistributionAdvisor()).thenReturn(distributionAdvisor); when(gfc.getRegionByPathForProcessing(any())).thenReturn(region); doThrow(new GemFireIOException("expected in fatal log message")).when(distributionAdvisor) .waitForCurrentOperations(); StateMarkerMessage message = new StateMarkerMessage(); message.relayRecipient = relayRecipient; message.process(dm); verify(dm, times(1)).putOutgoing(any()); } }