private void handleFlushReconcile(Message msg) { Address requester = msg.getSrc(); Tuple<Collection<? extends Address>,Digest> tuple=readParticipantsAndDigest(msg.getRawBuffer(), msg.getOffset(),msg.getLength()); Digest reconcileDigest = tuple.getVal2(); if (log.isDebugEnabled()) log.debug(localAddress + ": received FLUSH_RECONCILE, passing digest to NAKACK " + reconcileDigest); // Let NAKACK reconcile missing messages down_prot.down(new Event(Event.REBROADCAST, reconcileDigest)); if (log.isDebugEnabled()) log.debug(localAddress + ": returned from FLUSH_RECONCILE, " + " sending RECONCILE_OK to " + requester); Message reconcileOk = new Message(requester).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(this.id,new FlushHeader(FlushHeader.FLUSH_RECONCILE_OK)); down_prot.down(reconcileOk); }
protected void handleEvent(Event evt) { switch(evt.getType()) { case Event.VIEW_CHANGE: View old_view=view, new_view=evt.getArg(); this.view=new_view; if(old_view == null) { // first join Util.sleepRandom(0, stagger_timeout); // 1. send my own mapping to all multicastOwnMapping(); // 2. ask the coordinator to send us the cache contents Address coord=new_view.getCoord(); if(Objects.equals(local_addr, coord)) return; Message msg=new Message(coord).setFlag(Message.Flag.OOB).putHeader(id, new Header(Type.CACHE_REQ)); down_prot.down(msg); return; } if(new_view instanceof MergeView) { Util.sleepRandom(0, stagger_timeout); multicastOwnMapping(); } break; case Event.SET_LOCAL_ADDRESS: local_addr=evt.getArg(); break; } }
protected static String printEvents(List<Integer> events) { StringBuilder sb=new StringBuilder("["); for(int evt: events) sb.append(Event.type2String(evt)).append(" "); sb.append("]"); return sb.toString(); }
public Object down(Event evt) { if(evt.getType() == Event.CONFIG) { Map<String,Object> map=evt.getArg(); Integer tmp=map != null? (Integer)map.get("frag_size") : null; if(tmp != null) frag_size=tmp; if(frag_size > 0 && max_bytes % frag_size != 0) log.warn("For optimal performance, max_bytes (%d) should be a multiple of frag_size (%d)", max_bytes, frag_size); } return down_prot.down(evt); }
public Object up(Message msg) { GmsHeader hdr=msg.getHeader(this.id); if(hdr == null) return up_prot.up(msg); log.warn("%s: failed to create view from delta-view; dropping view: %s", local_addr, t.toString()); log.trace("%s: sending request for full view to %s", local_addr, msg.src()); down_prot.down(full_view_req); return null; if(!new_view.containsMember(coord)) { return null; Digest tmp=tuple.getVal2(); down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; Digest digest=(Digest)down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if(view_id != null) { ViewId my_view_id=this.view != null? this.view.getViewId() : null; if(my_view_id != null && my_view_id.compareToIDs(view_id) <= 0) return null; // my view-id doesn't differ from sender's view-id; no need to send view if(log.isErrorEnabled()) log.error(Util.getMessage("GmsHeaderWithType"), hdr.type);
public Object down(Event evt) { switch(evt.getType()) { handleViewChange(evt.getArg()); break; StateTransferInfo info=evt.getArg(); Address target=info.target; log.error("%s: cannot fetch state from myself", local_addr); target=null; log.debug("%s: first member (no state)", local_addr); up_prot.up(new Event(Event.STATE_TRANSFER_INPUTSTREAM_CLOSED, new StateTransferResult())); Message state_req=new Message(target).putHeader(this.id, new StateHeader(StateHeader.STATE_REQ)) .setFlag(Message.Flag.SKIP_BARRIER, Message.Flag.DONT_BUNDLE, Message.Flag.OOB); log.debug("%s: asking %s for state", local_addr, target); down_prot.down(state_req); handleConfig(evt.getArg()); break; local_addr=evt.getArg(); break; return down_prot.down(evt); // pass on to the layer below us
public Object down(Event evt) { switch(evt.getType()) { case Event.VIEW_CHANGE: Object retval=down_prot.down(evt); View view=evt.getArg(); members.addAll(view.getMembers()); bcast_task.adjustSuspectedMembers(members); computePingDest(null); if(view.size() <= 1) stopMonitor(); else if(!isMonitorRunning()) FdHeader hdr=new FdHeader(FdHeader.UNSUSPECT); hdr.mbrs=new ArrayList<>(); hdr.mbrs.add(evt.getArg()); hdr.from=local_addr; Message unsuspect_msg=new Message().setFlag(Message.Flag.INTERNAL).putHeader(id, hdr); log.trace("%s: broadcasting UNSUSPECT message (mbrs=%s)", local_addr, hdr.mbrs); down_prot.down(unsuspect_msg); break; local_addr=evt.getArg(); break; return down_prot.down(evt);
final List<Address> current_mbrs=view.getMembers(); return new MutableDigest(view.getMembersRaw()) .set((Digest)gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr))); Message get_digest_req=new Message().setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(gms.getId(), new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ).mergeId(merge_id)); gms.getDownProtocol().down(get_digest_req); Digest digest=(Digest)gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); digest_collector.add(gms.local_addr, digest); digest_collector.waitForAllResponses(max_wait_time); if(log.isTraceEnabled()) { if(digest_collector.hasAllResponses()) log.trace("%s: fetched all digests for %s", gms.local_addr, current_mbrs); else log.trace("%s: fetched incomplete digests (after timeout of %d) ms for %s", gms.local_addr, max_wait_time, current_mbrs);
log.trace("%s: mcasting view %s", local_addr, new_view); up_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); Message view_change_msg=new Message().putHeader(this.id, new GmsHeader(GmsHeader.VIEW)) .setBuffer(marshal(new_view, digest)).setTransientFlag(Message.TransientFlag.DONT_LOOPBACK); if(new_view instanceof MergeView) // https://issues.jboss.org/browse/JGRP-1484 view_change_msg.setFlag(Message.Flag.NO_TOTAL_ORDER); down_prot.down(view_change_msg); sendJoinResponses(jr, joiners); try { if(ack_collector.size() > 0) { ack_collector.waitForAllAcks(view_ack_collection_timeout); log.trace("%s: got all ACKs (%d) for view %s in %d ms", local_addr, ack_collector.expectedAcks(), new_view.getViewId(), System.currentTimeMillis()-start); log.warn("%s: failed to collect all ACKs (expected=%d) for view %s after %dms, missing %d ACKs from %s", local_addr, ack_collector.expectedAcks(), new_view.getViewId(), view_ack_collection_timeout, ack_collector.size(), ack_collector.printMissing());
log.trace("%s: cancelling merge as we only have 1 coordinator: %s", local_addr, coords); return; log.trace("%s: merge participants are %s", local_addr, coords); log.trace("%s: reduced %d coords to %d", local_addr, old_size, max_participants_in_merge); continue; Message view_req=new Message(target).setFlag(Message.Flag.INTERNAL) .putHeader(getId(), MergeHeader.createViewRequest()); down_prot.down(view_req); if(Util.allEqual(tmp_views)) { log.trace("%s: all views are the same, suppressing sending MERGE up. Views: %s", local_addr, tmp_views); return; up_prot.up(new Event(Event.MERGE, merge_views)); num_merge_events++;
@Override public void members(List<PingData> mbrs) { PhysicalAddress own_physical_addr=(PhysicalAddress)down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); PingData data=new PingData(local_addr, false, org.jgroups.util.NameCache.get(local_addr), own_physical_addr); PingHeader hdr=new PingHeader(PingHeader.GET_MBRS_REQ).clusterName(cluster_name); Set<PhysicalAddress> physical_addrs=mbrs.stream().filter(ping_data -> ping_data != null && ping_data.getPhysicalAddr() != null) .map(PingData::getPhysicalAddr).collect(Collectors.toSet()); for(PhysicalAddress physical_addr: physical_addrs) { if(physical_addr != null && own_physical_addr.equals(physical_addr)) // no need to send the request to myself continue; // the message needs to be DONT_BUNDLE, see explanation above final Message msg=new Message(physical_addr).setFlag(Message.Flag.INTERNAL, Message.Flag.DONT_BUNDLE, Message.Flag.OOB) .putHeader(this.id, hdr).setBuffer(marshal(data)); log.trace("%s: sending discovery request to %s", local_addr, msg.getDest()); down_prot.down(msg); } }
PingData data=readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr=data != null? data.getAddress() : msg.src(); log.warn("cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); log.warn("%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); Map<Address,PhysicalAddress> cache=(Map<Address,PhysicalAddress>)down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if(cache != null) { for(Map.Entry<Address,PhysicalAddress> entry: cache.entrySet()) { Address addr=entry.getKey(); if(addr.equals(local_addr) || (view != null && view.containsMember(addr))) { PhysicalAddress physical_addr=entry.getValue(); sendDiscoveryResponse(addr, physical_addr, NameCache.get(addr), msg.getSrc(), isCoord(addr)); boolean drop_because_of_rank=use_ip_addrs && max_rank_to_reply > 0 && hdr.initialDiscovery() && Util.getRank(view, local_addr) > max_rank_to_reply; if(drop_because_of_rank || (mbrs != null && !mbrs.contains(local_addr))) return null; PhysicalAddress physical_addr=(PhysicalAddress)down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); sendDiscoveryResponse(local_addr, physical_addr, NameCache.get(local_addr), msg.getSrc(), is_coord); return null; log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src());
protected void getStateFromApplication(Address requester, Digest digest) { StateTransferInfo rsp=(StateTransferInfo)up_prot.up(new Event(Event.GET_APPLSTATE)); byte[] state=rsp.state; if(stats) { num_state_reqs.increment(); if(state != null) num_bytes_sent.add(state.length); avg_state_size=num_bytes_sent.doubleValue() / num_state_reqs.doubleValue(); } Message state_rsp=new Message(requester, state).putHeader(this.id, new StateHeader(StateHeader.STATE_RSP, digest)); log.trace("%s: sending state to %s (size=%s)", local_addr, state_rsp.getDest(), Util.printBytes(state != null? state.length : 0)); down_prot.down(state_rsp); }
public Object down(Event evt) { switch(evt.getType()) { case Event.SET_LOCAL_ADDRESS: local_addr=evt.getArg(); break; case Event.VIEW_CHANGE: View v=evt.getArg(); adjustSuspectedMembers(v.getMembers()); break; } return down_prot.down(evt); }
/** send client's public key to server and request server's public key */ protected void sendKeyRequest(Address key_server) { if(key_server == null) return; if(last_key_request == 0 || System.currentTimeMillis() - last_key_request > min_time_between_key_requests) last_key_request=System.currentTimeMillis(); else return; if(use_external_key_exchange) { log.debug("%s: asking key exchange protocol to get secret key from %s", local_addr, key_server); down_prot.down(new Event(Event.FETCH_SECRET_KEY, key_server)); return; } log.debug("%s: asking %s for the secret key (my version: %s)", local_addr, key_server, Util.byteArrayToHexString(sym_version)); Message newMsg=new Message(key_server, key_pair.getPublic().getEncoded()).src(local_addr) .putHeader(this.id,new EncryptHeader(EncryptHeader.SECRET_KEY_REQ, null)); down_prot.down(newMsg); }
if (log.isDebugEnabled()) log.debug(localAddress + ": flush coordinator " + flushCoordinator + " suspected," + " I am the neighbor, completing the flush "); onResume(new Event(Event.RESUME, flushMembersCopy)); flushOkCompleted = !flushCompletedMap.isEmpty() && flushCompletedMap.keySet().containsAll(flushMembers); if (flushOkCompleted) { m = new Message(flushCoordinator).src(localAddress); log.debug(localAddress + ": suspects: " + addresses + ", completed " + flushOkCompleted + ", flushOkSet " + flushCompletedMap + ", flushMembers " + flushMembers); Digest digest = (Digest) down_prot.down(Event.GET_DIGEST_EVT); m.putHeader(this.id, new FlushHeader(FlushHeader.FLUSH_COMPLETED, viewID)).setBuffer(marshal(null, digest)); down_prot.down(m); if (log.isDebugEnabled())
physical_addr=(PhysicalAddress)down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); Collection<PhysicalAddress> list=(Collection<PhysicalAddress>)down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESSES)); if(list != null) list.stream().filter(phys_addr -> !cluster_members.contains(phys_addr)).forEach(cluster_members::add); final Message msg=new Message(addr).setFlag(Message.Flag.INTERNAL, Message.Flag.DONT_BUNDLE, Message.Flag.OOB) .putHeader(this.id,hdr); if(data != null) msg.setBuffer(marshal(data));
boolean validView=new_view != null && new_view.size() > 0; if(validView && flushProtocolInStack) { int attemptCount = 0; while (attemptCount < maxAttempts) { if (attemptCount > 0) Util.sleepRandom(randomFloor, randomCeiling); try { up_prot.up(new Event(Event.SUSPEND, new ArrayList<>(new_view.getMembers()))); successfulFlush = true; break; if(log.isTraceEnabled()) log.trace(local_addr + ": successful GMS flush by coordinator"); up(new Event(Event.RESUME, new ArrayList<>(new_view.getMembers()))); if (log.isWarnEnabled()) log.warn(local_addr + ": GMS flush by coordinator failed");