/** Checks if two views have the same members observing order. E.g. {A,B,C} and {B,A,C} returns false, * {A,C,B} and {A,C,B} returns true */ public static boolean sameMembersOrdered(View v1, View v2) { return Arrays.equals(v1.getMembersRaw(), v2.getMembersRaw()); }
protected static int getRank(Address member, View v) { if(v == null || member == null) return -1; Address[] members=v.getMembersRaw(); for(int i=0; i < members.length; i++) if(member.equals(members[i])) return i; return -1; }
/** Returns a digest which contains, for all members of view, the highest delivered and received * seqno of all digests */ protected static Digest maxSeqnos(final View view, List<Digest> digests) { if(view == null || digests == null) return null; MutableDigest digest=new MutableDigest(view.getMembersRaw()); digests.forEach(digest::merge); return digest; }
protected static boolean writeAddresses(final View view, final Digest digest) { return digest == null || view == null || !Arrays.equals(view.getMembersRaw(),digest.getMembersRaw()); }
@Override public List<Node> getMembers() { List<Node> members = new ArrayList<>(this.view.size()); for (Address address : this.view.getMembersRaw()) { members.add(this.factory.createNode(address)); } return members; }
/** * Needs to return a map of all subview coordinators and their views (as a collection of members). The merge policy * is defined in https://issues.jboss.org/browse/JGRP-1910 */ protected static Map<Address,Collection<Address>> determineMergeCoords(Map<Address,View> views) { Map<Address,Collection<Address>> retval=new HashMap<>(); for(View view: views.values()) { Address coord=view.getCreator(); Collection<Address> members=retval.computeIfAbsent(coord, k -> new ArrayList<>()); for(Address mbr: view.getMembersRaw()) if(!members.contains(mbr)) members.add(mbr); } // For the merge participants which are not coordinator, we simply add them, and the associated // membership list consists only of themselves Collection<Address> merge_participants=Util.determineMergeParticipants(views); merge_participants.removeAll(retval.keySet()); merge_participants.stream().filter(merge_participant -> !retval.containsKey(merge_participant)) .forEach(merge_participant -> retval.put(merge_participant, Collections.singletonList(merge_participant))); return retval; }
protected String printDigest(final Digest digest) { if(digest == null) return null; return view != null? digest.toString(view.getMembersRaw(), false) : digest.toString(); }
@GuardedBy("lock") protected void resetDigest() { if(view == null) return; digest=new MutableDigest(view.getMembersRaw()); // .set(getDigest()); votes=new FixedSizeBitSet(view.size()); // all 0's initially }
public void readFrom(DataInput in) throws Exception { byte flags=in.readByte(); // 1. view if((flags & VIEW_PRESENT) == VIEW_PRESENT) { view=new View(); view.readFrom(in); } // 2. digest if((flags & DIGEST_PRESENT) == DIGEST_PRESENT) { digest=new Digest(view.getMembersRaw()); digest.readFrom(in, false); } // 3. fail_reason if((flags & FAIL_REASON_PRESENT) == FAIL_REASON_PRESENT) fail_reason=in.readUTF(); }
/** * Merge all digests into one. For each sender, the new value is max(highest_delivered), * max(highest_received). This method has a lock on merge_rsps */ protected MutableDigest consolidateDigests(final View new_view, final List<MergeData> merge_rsps) { MutableDigest retval=new MutableDigest(new_view.getMembersRaw()); for(MergeData data: merge_rsps) { Digest tmp_digest=data.getDigest(); if(tmp_digest != null) retval.merge(tmp_digest); } return retval; } }
public Object down(Event evt) { Object retval=super.down(evt); switch(evt.getType()) { case Event.VIEW_CHANGE: for(Address logical_addr: view.getMembersRaw()) { PhysicalAddress physical_addr=(PhysicalAddress)down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, logical_addr)); if(physical_addr != null && !initial_hosts.contains(physical_addr)) { dynamic_hosts.addIfAbsent(physical_addr); } } break; case Event.ADD_PHYSICAL_ADDRESS: Tuple<Address,PhysicalAddress> tuple=evt.getArg(); PhysicalAddress physical_addr=tuple.getVal2(); if(physical_addr != null && !initial_hosts.contains(physical_addr)) dynamic_hosts.addIfAbsent(physical_addr); break; } return retval; }
/** Returns the current view and digest. Try to find a matching digest twice (if not found on the first try) */ public Tuple<View,Digest> getViewAndDigest() { MutableDigest digest=new MutableDigest(view.getMembersRaw()).set(getDigest()); return digest.allSet() || digest.set(getDigest()).allSet()? new Tuple<>(view, digest) : null; }
Address[] mbrs=view.getMembersRaw(); log.debug("%s: running reconciliation protocol on %d members", local_addr, mbrs != null? mbrs.length : 0); lock_info_responses.reset(mbrs);
public Object down(Event evt) { Object retval=super.down(evt); switch(evt.getType()) { case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: register(cluster_name, local_addr, this); break; case Event.SET_LOCAL_ADDRESS: local_addr=evt.getArg(); break; case Event.BECOME_SERVER: // called after client has joined and is fully working group member is_server=true; break; case Event.VIEW_CHANGE: case Event.TMP_VIEW: curr_view=evt.getArg(); Address[] mbrs=((View)evt.getArg()).getMembersRaw(); is_coord=local_addr != null && mbrs != null && mbrs.length > 0 && local_addr.equals(mbrs[0]); break; case Event.GET_PING_DATA: return getDiscoveryResponsesFor(evt.getArg()); // don't pass further down } return retval; }
return new MutableDigest(view.getMembersRaw()) .set((Digest)gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)));
gms.getDownProtocol().down(new Event(Event.SUSPEND_STABLE, MAX_SUSPEND_TIMEOUT)); MutableDigest join_digest=new MutableDigest(new_view.getMembersRaw()).set(gms.getDigest()); for(Address member: new_mbrs) join_digest.set(member,0,0); // ... and set the new members. their first seqno will be 1
/** Typically received by the coord, which sends its cache contents to the sender (new joiner). However, we don't * send one large message, but rather N messages (1 per cluster member). The reason is that we don't know where in * the stack NAMING will be running and therefore cannot assume fragmentation of large messages. */ protected void handleCacheRequest(Address sender) { int view_size=view != null? view.size() : 0; if(view_size == 0) return; for(Address addr: view.getMembersRaw()) { if(Objects.equals(addr, sender)) continue; String logical_name=NameCache.get(addr); if(logical_name == null) continue; Header hdr=new Header(Type.CACHE_RSP, addr, logical_name); Message msg=new Message(sender).putHeader(id, hdr); if(log.isTraceEnabled()) log.trace("%s: sending %s to %s", local_addr, hdr, sender); try { down_prot.down(msg); } catch(Throwable t) { log.error("failed sending CACHE_RSP", t); } } }
public static Tuple<View,Digest> _readViewAndDigest(byte[] buffer, int offset, int length) throws Exception { if(buffer == null) return null; DataInput in=new ByteArrayDataInputStream(buffer, offset, length); View tmp_view=null; Digest digest=null; short flags=in.readShort(); if((flags & VIEW_PRESENT) == VIEW_PRESENT) { tmp_view=(flags & MERGE_VIEW) == MERGE_VIEW? new MergeView() : (flags & DELTA_VIEW) == DELTA_VIEW? new DeltaView() : new View(); tmp_view.readFrom(in); } if((flags & DIGEST_PRESENT) == DIGEST_PRESENT) { if((flags & READ_ADDRS) == READ_ADDRS) { digest=new Digest(); digest.readFrom(in); } else { digest=new Digest(tmp_view.getMembersRaw()); digest.readFrom(in,false); } } return new Tuple<>(tmp_view, digest); }
left_mbrs=change_key_on_leave && this.view != null && !v.containsMembers(this.view.getMembersRaw()); create_new_key=secret_key == null || left_mbrs; super.handleView(v);
final MutableDigest d=new MutableDigest(current_view.getMembersRaw()).set(getDigest()); Address dest=send_stable_msgs_to_coord_only? coordinator : null;