switch(evt.getType()) { case Event.STABLE: // generated by STABLE layer. Delete stable messages passed in arg stable(evt.getArg()); return null; // do not pass down further (Bela Aug 7 2001) return getDigest(evt.getArg()); setDigest(evt.getArg()); return null; overwriteDigest(evt.getArg()); return null; mergeDigest(evt.getArg()); return null; members=mbrs; view=tmp_view; adjustReceivers(mbrs); is_server=true; // check vids from now on if(suppress_log_non_member != null) flushBecomeServerQueue(); break; reset(); break; rebroadcast_digest=evt.getArg();
/** * Returns a default stack for testing with transport = SHARED_LOOPBACK * @param additional_protocols Any number of protocols to add to the top of the returned protocol list * @return */ public static Protocol[] getTestStack(Protocol... additional_protocols) { Protocol[] protocols={ new SHARED_LOOPBACK(), new SHARED_LOOPBACK_PING(), new NAKACK2(), new UNICAST3(), new STABLE(), new GMS().joinTimeout(1000), new FRAG2().fragSize(8000) }; if(additional_protocols == null) return protocols; Protocol[] tmp=Arrays.copyOf(protocols,protocols.length + additional_protocols.length); System.arraycopy(additional_protocols, 0, tmp, protocols.length, additional_protocols.length); return tmp; }
protected void checkForRebroadcasts() { Digest tmp=getDigest(); boolean cancel_rebroadcasting=false; rebroadcast_digest_lock.lock(); try { cancel_rebroadcasting=isGreaterThanOrEqual(tmp, rebroadcast_digest); } catch(Throwable t) { ; } finally { rebroadcast_digest_lock.unlock(); } if(cancel_rebroadcasting) cancelRebroadcasting(); }
/** * <b>Callback</b>. Called by superclass when event may be handled.<p> <b>Do not use {@code passUp} in this * method as the event is passed up by default by the superclass after this method returns !</b> */ public Object up(Event evt) { switch(evt.getType()) { case Event.STABLE: // generated by STABLE layer. Delete stable messages passed in arg stable(evt.getArg()); return null; // do not pass up further (Bela Aug 7 2001) case Event.SUSPECT: // release the promise if rebroadcasting is in progress... otherwise we wait forever. there will be a new // flush round anyway if(rebroadcasting) cancelRebroadcasting(); break; } return up_prot.up(evt); }
sb.append("existing digest: " + getDigest()).append("\nnew digest: " + digest); buf=createTable(highest_delivered_seqno); xmit_table.put(member, buf); sb.append("\n").append("resulting digest: " + getDigest().toString(digest)); digest_history.add(sb.toString()); if(log.isDebugEnabled())
queueMessage(msg, hdr.seqno); continue; SeqnoList missing=Util.streamableFromBuffer(SeqnoList::new, msg.getRawBuffer(), msg.getOffset(), msg.getLength()); if(missing != null) handleXmitReq(msg.getSrc(), missing, hdr.sender); Message xmitted_msg=msgFromXmitRsp(msg, hdr); if(xmitted_msg != null) { if(msgs == null) handleHighestSeqno(batch.sender(), hdr.seqno); break; default: handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName()); checkForRebroadcasts();
rebroadcast_digest_lock.unlock(); Digest my_digest=getDigest(); boolean xmitted=false; if(their_high > my_high) { log.trace("%s: fetching %d-%d from %s", local_addr, my_high, their_high, member); retransmit(my_high+1, their_high, member, true); // use multicast to send retransmit request xmitted=true; try { try { my_digest=getDigest(); rebroadcast_digest_lock.lock(); try { if(!rebroadcasting || isGreaterThanOrEqual(my_digest, rebroadcast_digest)) return;
protected void handleMessages(Address dest, Address sender, List<LongTuple<Message>> msgs, boolean oob, AsciiString cluster_name) { Table<Message> buf=xmit_table.get(sender); if(buf == null) { // discard message if there is no entry for sender unknownMember(sender, "batch"); return; oob_batch.add(tuple.getVal2()); deliverBatch(oob_batch); removeAndDeliver(buf, sender, loopback, cluster_name); // at most 1 thread will execute this at any given time
Table<Message> buf=xmit_table.get(sender); if(buf == null) { // discard message if there is no entry for sender unknownMember(sender, hdr.seqno); return; deliver(msg, sender, hdr.seqno, "OOB message"); deliver(msg, sender, hdr.seqno, "OOB message"); removeAndDeliver(buf, sender, loopback, null); // at most 1 thread will execute this at any given time
protected void handleXmitRsp(Message msg, NakAckHeader2 hdr) { if(msg == null) return; try { if(stats) xmit_rsps_received.increment(); msg.setDest(null); NakAckHeader2 newhdr=hdr.copy(); newhdr.type=NakAckHeader2.MSG; // change the type back from XMIT_RSP --> MSG msg.putHeader(id, newhdr); handleMessage(msg, newhdr); if(rebroadcasting) checkForRebroadcasts(); } catch(Exception ex) { log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "retransmitted message", msg, ex); } }
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case, * we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829). * Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool */ protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) { AtomicInteger adders=buf.getAdders(); if(adders.getAndIncrement() != 0) return; boolean remove_msgs=discard_delivered_msgs && !loopback; MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); // Don't include DUMMY and OOB_DELIVERED messages in the removed set buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) deliverBatch(batch); } while(adders.decrementAndGet() != 0); if(rebroadcasting) checkForRebroadcasts(); }
public Digest getDigest(Address mbr) { if(mbr == null) return getDigest(); Table<Message> buf=xmit_table.get(mbr); if(buf == null) return null; long[] seqnos=buf.getDigest(); return new Digest(mbr, seqnos[0], seqnos[1]); }
@Override public void getMessageState(InternalDistributedMember target, Map<String, Long> state, boolean includeMulticast) { if (includeMulticast) { NAKACK2 nakack = (NAKACK2) myChannel.getProtocolStack().findProtocol("NAKACK2"); if (nakack != null) { long seqno = nakack.getCurrentSeqno(); state.put("JGroups.mcastState", seqno); } } }
/** * Removes old members from xmit-table and adds new members to xmit-table (at seqnos hd=0, hr=0). * This method is not called concurrently */ protected void adjustReceivers(List<Address> members) { Set<Address> keys=xmit_table.keySet(); // remove members which left for(Address member: keys) { if(!members.contains(member)) { if(Objects.equals(local_addr, member)) continue; Table<Message> buf=xmit_table.remove(member); if(buf != null) log.debug("%s: removed %s from xmit_table (not member anymore)", local_addr, member); } } members.stream().filter(mbr -> !keys.contains(mbr)).forEach(mbr -> xmit_table.putIfAbsent(mbr, createTable(0))); }
queueMessage(msg, hdr.seqno); continue; SeqnoList missing=Util.streamableFromBuffer(SeqnoList::new, msg.getRawBuffer(), msg.getOffset(), msg.getLength()); if(missing != null) handleXmitReq(msg.getSrc(), missing, hdr.sender); Message xmitted_msg=msgFromXmitRsp(msg, hdr); if(xmitted_msg != null) { if(msgs == null) handleHighestSeqno(batch.sender(), hdr.seqno); break; default: handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName()); checkForRebroadcasts();
.append("existing digest: " + getDigest()).append("\nnew digest: " + digest) : null; buf=createTable(highest_delivered_seqno); xmit_table.put(member, buf); sb.append("\n").append("resulting digest: " + getDigest().toString(digest)); if(set_own_seqno && sb != null) sb.append("\nnew seqno for " + local_addr + ": " + seqno);
rebroadcast_digest_lock.unlock(); Digest my_digest=getDigest(); boolean xmitted=false; if(their_high > my_high) { log.trace("%s: fetching %d-%d from %s", local_addr, my_high, their_high, member); retransmit(my_high+1, their_high, member, true); // use multicast to send retransmit request xmitted=true; try { try { my_digest=getDigest(); rebroadcast_digest_lock.lock(); try { if(!rebroadcasting || isGreaterThanOrEqual(my_digest, rebroadcast_digest)) return;
protected void handleMessages(Address dest, Address sender, List<LongTuple<Message>> msgs, boolean oob, AsciiString cluster_name) { Table<Message> buf=xmit_table.get(sender); if(buf == null) { // discard message if there is no entry for sender unknownMember(sender, "batch"); return; oob_batch.add(tuple.getVal2()); deliverBatch(oob_batch); removeAndDeliver(buf, sender, loopback, cluster_name); // at most 1 thread will execute this at any given time
Table<Message> buf=xmit_table.get(sender); if(buf == null) { // discard message if there is no entry for sender unknownMember(sender, hdr.seqno); return; deliver(msg, sender, hdr.seqno, "OOB message"); deliver(msg, sender, hdr.seqno, "OOB message"); removeAndDeliver(buf, sender, loopback, null); // at most 1 thread will execute this at any given time
/** * <b>Callback</b>. Called by superclass when event may be handled.<p> <b>Do not use {@code passUp} in this * method as the event is passed up by default by the superclass after this method returns !</b> */ public Object up(Event evt) { switch(evt.getType()) { case Event.STABLE: // generated by STABLE layer. Delete stable messages passed in arg stable(evt.getArg()); return null; // do not pass up further (Bela Aug 7 2001) case Event.SUSPECT: // release the promise if rebroadcasting is in progress... otherwise we wait forever. there will be a new // flush round anyway if(rebroadcasting) cancelRebroadcasting(); break; } return up_prot.up(evt); }