protected void update(Address sender) { if(sender != null && !sender.equals(local_addr)) { AtomicBoolean heartbeat_received=timestamps.get(sender); if(heartbeat_received != null) heartbeat_received.compareAndSet(false, true); else timestamps.putIfAbsent(sender, new AtomicBoolean(true)); } if (log.isTraceEnabled()) log.trace("Received heartbeat from %s", sender); }
protected void sendResponse(Address dest, short id) { try { RsvpHeader hdr=new RsvpHeader(RsvpHeader.RSP,id); Message msg=new Message(dest) .putHeader(this.id, hdr) .setFlag(Message.Flag.RSVP, Message.Flag.INTERNAL, Message.Flag.DONT_BUNDLE, Message.Flag.OOB); if(log.isTraceEnabled()) log.trace(local_addr + ": " + hdr.typeToString() + " --> " + dest); down_prot.down(msg); } catch(Throwable t) { log.error(Util.getMessage("FailedSendingResponse"), t); } }
public void receive(long id, Message msg) { Address sender=msg.getSrc(); if(sender == null) { if(log.isErrorEnabled()) log.error(local_addr + ": sender is null, cannot deliver message " + "::" + id); return; } if(!canDeliver(sender, id)) { if(log.isWarnEnabled()) log.warn(local_addr + ": dropped duplicate message " + sender + "::" + id); return; } if(log.isTraceEnabled()) log.trace(local_addr + ": delivering " + sender + "::" + id); up_prot.up(msg); }
private void logException(String msg, Exception e) { if (log.isDebugEnabled()) { log.debug(msg, e); } else if (log.isWarnEnabled()) { log.warn("%s. Error is %s", msg, e.getLocalizedMessage()); } }
public Value<V> getEntry(K key) { if(log.isTraceEnabled()) log.trace("getEntry(" + key + ")"); return map.get(key); }
void stopFlush(List<Address> members) { if(log.isTraceEnabled()){ log.trace(local_addr + ": sending RESUME event"); } up_prot.up(new Event(Event.RESUME,members)); }
@ManagedOperation public V get(K key) { if(log.isTraceEnabled()) log.trace("get(" + key + ")"); Value<V> val=map.get(key); if (isExpired(val)) { map.remove(key); return null; } return getValue(val); }
protected void update(Address sender) { if(sender != null && !sender.equals(local_addr)) timestamps.put(sender, getTimestamp()); if (log.isTraceEnabled()) log.trace("Received heartbeat from %s", sender); }
protected void handleViewChange(View view) { List<Address> new_mbrs=view.getMembers(); List<Address> left_mbrs=Util.determineLeftMembers(members, new_mbrs); members.clear(); members.addAll(new_mbrs); for(Address mbr: left_mbrs) { // the new view doesn't contain the sender, it must have left, hence we will clear its fragmentation tables fragment_list.remove(mbr); log.trace("%s: removed %s from fragmentation table", local_addr, mbr); } }
public V remove(K key) { if(log.isTraceEnabled()) log.trace("remove(" + key + ")"); return getValue(map.remove(key)); }
protected ReceiverEntry createReceiverEntry(Address sender, long seqno, short conn_id) { Table<Message> table=new Table<>(xmit_table_num_rows, xmit_table_msgs_per_row, seqno-1, xmit_table_resize_factor, xmit_table_max_compaction_time); ReceiverEntry entry=new ReceiverEntry(table, conn_id); ReceiverEntry entry2=recv_table.putIfAbsent(sender, entry); if(entry2 != null) return entry2; log.trace("%s: created receiver window for %s at seqno=#%d for conn-id=%d", local_addr, sender, seqno, conn_id); return entry; }
private void handleFlushReconcile(Message msg) { Address requester = msg.getSrc(); Tuple<Collection<? extends Address>,Digest> tuple=readParticipantsAndDigest(msg.getRawBuffer(), msg.getOffset(),msg.getLength()); Digest reconcileDigest = tuple.getVal2(); if (log.isDebugEnabled()) log.debug(localAddress + ": received FLUSH_RECONCILE, passing digest to NAKACK " + reconcileDigest); // Let NAKACK reconcile missing messages down_prot.down(new Event(Event.REBROADCAST, reconcileDigest)); if (log.isDebugEnabled()) log.debug(localAddress + ": returned from FLUSH_RECONCILE, " + " sending RECONCILE_OK to " + requester); Message reconcileOk = new Message(requester).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(this.id,new FlushHeader(FlushHeader.FLUSH_RECONCILE_OK)); down_prot.down(reconcileOk); }
protected void sendMergeRejectionMessage(Address dest) { Message msg = new Message(dest).setFlag(Message.Flag.OOB); GmsHeader hdr = new GmsHeader(GmsHeader.MERGE_RSP); hdr.setMergeRejected(true); msg.putHeader(GMS_ID, hdr); if (log.isDebugEnabled()) log.debug("merge response=" + hdr); down_prot.down(msg); } }
public Object down(Event evt) { switch(evt.getType()) { case Event.VIEW_CHANGE: Object retval=down_prot.down(evt); View view=evt.getArg(); members.addAll(view.getMembers()); bcast_task.adjustSuspectedMembers(members); computePingDest(null); if(view.size() <= 1) stopMonitor(); else if(!isMonitorRunning()) FdHeader hdr=new FdHeader(FdHeader.UNSUSPECT); hdr.mbrs=new ArrayList<>(); hdr.mbrs.add(evt.getArg()); hdr.from=local_addr; Message unsuspect_msg=new Message().setFlag(Message.Flag.INTERNAL).putHeader(id, hdr); log.trace("%s: broadcasting UNSUSPECT message (mbrs=%s)", local_addr, hdr.mbrs); down_prot.down(unsuspect_msg); break; break; return down_prot.down(evt);
@Override public void members(List<PingData> mbrs) { PhysicalAddress own_physical_addr=(PhysicalAddress)down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); PingData data=new PingData(local_addr, false, org.jgroups.util.NameCache.get(local_addr), own_physical_addr); PingHeader hdr=new PingHeader(PingHeader.GET_MBRS_REQ).clusterName(cluster_name); Set<PhysicalAddress> physical_addrs=mbrs.stream().filter(ping_data -> ping_data != null && ping_data.getPhysicalAddr() != null) .map(PingData::getPhysicalAddr).collect(Collectors.toSet()); for(PhysicalAddress physical_addr: physical_addrs) { if(physical_addr != null && own_physical_addr.equals(physical_addr)) // no need to send the request to myself continue; // the message needs to be DONT_BUNDLE, see explanation above final Message msg=new Message(physical_addr).setFlag(Message.Flag.INTERNAL, Message.Flag.DONT_BUNDLE, Message.Flag.OOB) .putHeader(this.id, hdr).setBuffer(marshal(data)); log.trace("%s: sending discovery request to %s", local_addr, msg.getDest()); down_prot.down(msg); } }
/** * Starts the flush protocol * @param members List of participants in the flush protocol. Guaranteed to be non-null */ private void onSuspend(final List<Address> members) { Message msg = null; Collection<Address> participantsInFlush = null; synchronized (sharedLock) { flushCoordinator = localAddress; // start FLUSH only on group members that we need to flush participantsInFlush = members; participantsInFlush.retainAll(currentView.getMembers()); flushMembers.clear(); flushMembers.addAll(participantsInFlush); flushMembers.removeAll(suspected); msg = new Message(null).src(localAddress).setBuffer(marshal(participantsInFlush, null)) .putHeader(this.id, new FlushHeader(FlushHeader.START_FLUSH, currentViewId())); } if (participantsInFlush.isEmpty()) { flush_promise.setResult(SUCCESS_START_FLUSH); } else { down_prot.down(msg); if (log.isDebugEnabled()) log.debug(localAddress + ": flush coordinator " + " is starting FLUSH with participants " + participantsInFlush); } }
protected void sendDiscoveryResponse(Address logical_addr, PhysicalAddress physical_addr, String logical_name, final Address sender, boolean coord) { final PingData data=new PingData(logical_addr, is_server, logical_name, physical_addr).coord(coord); final Message rsp_msg=new Message(sender).setFlag(Message.Flag.INTERNAL, Message.Flag.OOB, Message.Flag.DONT_BUNDLE) .putHeader(this.id, new PingHeader(PingHeader.GET_MBRS_RSP)).setBuffer(marshal(data)); if(stagger_timeout > 0) { int view_size=view != null? view.size() : 10; int rank=Util.getRank(view, local_addr); // returns 0 if view or local_addr are null long sleep_time=rank == 0? Util.random(stagger_timeout) : stagger_timeout * rank / view_size - (stagger_timeout / view_size); timer.schedule(() -> { log.trace("%s: received GET_MBRS_REQ from %s, sending staggered response %s", local_addr, sender, data); down_prot.down(rsp_msg); }, sleep_time, TimeUnit.MILLISECONDS, sends_can_block); return; } log.trace("%s: received GET_MBRS_REQ from %s, sending response %s", local_addr, sender, data); down_prot.down(rsp_msg); }
protected void sendSeqnoResponse(Address original_sender,long seqno, int num_seqnos) { SequencerHeader hdr = new SequencerHeader(SequencerHeader.RESPONSE, seqno, num_seqnos); Message ucast_msg = new Message(original_sender).putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace(local_addr + ": sending seqno response to " + original_sender + ":: new_seqno=" + seqno + ", num_seqnos=" + num_seqnos); down_prot.down(ucast_msg); sent_responses++; }
/** send client's public key to server and request server's public key */ protected void sendKeyRequest(Address key_server) { if(key_server == null) return; if(last_key_request == 0 || System.currentTimeMillis() - last_key_request > min_time_between_key_requests) last_key_request=System.currentTimeMillis(); else return; if(use_external_key_exchange) { log.debug("%s: asking key exchange protocol to get secret key from %s", local_addr, key_server); down_prot.down(new Event(Event.FETCH_SECRET_KEY, key_server)); return; } log.debug("%s: asking %s for the secret key (my version: %s)", local_addr, key_server, Util.byteArrayToHexString(sym_version)); Message newMsg=new Message(key_server, key_pair.getPublic().getEncoded()).src(local_addr) .putHeader(this.id,new EncryptHeader(EncryptHeader.SECRET_KEY_REQ, null)); down_prot.down(newMsg); }