protected void process(MessageBatch batch) { Address dest=batch.dest(), sender=batch.sender(); get(dest, sender).process(batch); }
protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
public void up(MessageBatch batch) { Collection<Message> msgs=batch.getMatchingMessages(id, true); boolean updated=false; if(msgs != null) { for(Message msg: msgs) { FdHeader hdr=msg.getHeader(id); // header is not null at this point if(hdr.type == FdHeader.HEARTBEAT_ACK) updated=true; else up(msg); // SUSPECT and HEARTBEAT } } if(updated || (msg_counts_as_heartbeat && batch.sender() != null)) updateTimestamp(batch.sender()); if(!batch.isEmpty()) up_prot.up(batch); }
protected void deliverBatch(MessageBatch batch) { try { if(batch == null || batch.isEmpty()) return; if(is_trace) { Message first=batch.first(), last=batch.last(); StringBuilder sb=new StringBuilder(local_addr + ": delivering " + batch.sender()); if(first != null && last != null) { NakAckHeader2 hdr1=first.getHeader(id), hdr2=last.getHeader(id); sb.append("#").append(hdr1.seqno).append("-").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch(Throwable t) { log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t); } }
public void up(MessageBatch batch) { // let unicast message batches pass if(batch.dest() != null && (batch.mode() == MessageBatch.Mode.OOB && batch.mode() == MessageBatch.Mode.INTERNAL) || holes.contains(batch.sender())) { up_prot.up(batch); return; } if(barrier_closed.get()) { final Map<Address,Message> map=batch.dest() == null? mcast_queue : ucast_queue; map.put(batch.sender(), batch.last().putHeader(transport.getId(),new TpHeader(batch.clusterName()))); return; // queue the last message of the batch and drop the batch } Thread current_thread=Thread.currentThread(); in_flight_threads.put(current_thread, NULL); try { up_prot.up(batch); } finally { unblock(current_thread); } }
public void passBatchUp(MessageBatch batch, boolean perform_cluster_name_matching, boolean discard_own_mcast) { if(is_trace) log.trace("%s: received message batch of %d messages from %s", local_addr, batch.size(), batch.sender()); if(up_prot == null) return; // Discard if message's cluster name is not the same as our cluster name if(perform_cluster_name_matching && cluster_name != null && !cluster_name.equals(batch.clusterName())) { if(log_discard_msgs && log.isWarnEnabled()) { Address sender=batch.sender(); if(suppress_log_different_cluster != null) suppress_log_different_cluster.log(SuppressLog.Level.warn, sender, suppress_time_different_cluster_warnings, batch.clusterName(),cluster_name, sender); else log.warn(Util.getMessage("BatchDroppedDiffCluster"), batch.clusterName(),cluster_name, sender); } return; } if(batch.multicast() && discard_own_mcast && local_addr != null && local_addr.equals(batch.sender())) return; up_prot.up(batch); }
/** Callback invoked by the protocol stack to deliver a message batch */ public JChannel up(MessageBatch batch) { if(stats) { received_msgs+=batch.size(); received_bytes+=batch.length(); } // discard local messages (sent by myself to me) if(discard_own_messages && local_addr != null && batch.sender() != null && local_addr.equals(batch.sender())) return this; if(up_handler != null) { try { up_handler.up(batch); } catch(Throwable t) { log.error(Util.getMessage("UpHandlerFailure"), t); } return this; } if(receiver != null) { try { receiver.receive(batch); } catch(Throwable t) { log.error(Util.getMessage("ReceiverFailure"), t); } } return this; }
public void up(MessageBatch batch) { if(secret_key == null) { log.trace("%s: discarded %s batch from %s as secret key is null", local_addr, batch.dest() == null? "mcast" : "unicast", batch.sender()); return; } BlockingQueue<Cipher> cipherQueue = decoding_ciphers; try { Cipher cipher=cipherQueue.take(); try { BiConsumer<Message,MessageBatch> decrypter=new Decrypter(cipher); batch.forEach(decrypter); } finally { cipherQueue.offer(cipher); } } catch(InterruptedException e) { log.error("%s: failed processing batch; discarding batch", local_addr, e); // we need to drop the batch if we for example have a failure fetching a cipher, or else other messages // in the batch might make it up the stack, bypassing decryption! This is not an issue because encryption // is below NAKACK2 or UNICAST3, so messages will get retransmitted return; } if(!batch.isEmpty()) up_prot.up(batch); }
continue; MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb);
MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { fork_channel.up(mb);
public void up(MessageBatch batch) { for(Message msg: batch) { // If we have a join or merge request --> authenticate, else pass up GMS.GmsHeader gms_hdr=getGMSHeader(msg); if(gms_hdr != null && needsAuthentication(gms_hdr)) { AuthHeader auth_hdr=msg.getHeader(id); if(auth_hdr == null) { log.warn("%s: found GMS join or merge request from %s but no AUTH header", local_addr, batch.sender()); sendRejectionMessage(gms_hdr.getType(), batch.sender(), "join or merge without an AUTH header"); batch.remove(msg); } else if(!handleAuthHeader(gms_hdr, auth_hdr, msg)) // authentication failed batch.remove(msg); // don't pass up } } if(!batch.isEmpty()) up_prot.up(batch); }
protected void removeAndDeliver(Table<Message> win, Address sender) { AtomicInteger adders=win.getAdders(); if(adders.getAndIncrement() != 0) return; final MessageBatch batch=new MessageBatch(win.size()).dest(local_addr).sender(sender).multicast(false); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); win.removeMany(true, 0, null, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) { // batch is guaranteed to NOT contain any OOB messages as the drop_oob_msgs_filter removed them deliverBatch(batch); } } while(adders.decrementAndGet() != 0); }
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case, * we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829). * Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool */ protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) { AtomicInteger adders=buf.getAdders(); if(adders.getAndIncrement() != 0) return; boolean remove_msgs=discard_delivered_msgs && !loopback; MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); // Don't include DUMMY and OOB_DELIVERED messages in the removed set buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) deliverBatch(batch); } while(adders.decrementAndGet() != 0); if(rebroadcasting) checkForRebroadcasts(); }
public void up(MessageBatch batch) { for(Message msg: batch) { Frag3Header hdr=msg.getHeader(this.id); if(hdr != null) { // needs to be defragmented Message assembled_msg=unfragment(msg,hdr); if(assembled_msg != null) { // the reassembled msg has to be add in the right place (https://issues.jboss.org/browse/JGRP-1648), // and canot be added to the tail of the batch ! assembled_msg.setSrc(batch.sender()); batch.replace(msg, assembled_msg); avg_size_up.add(assembled_msg.length()); } else batch.remove(msg); } } if(!batch.isEmpty()) up_prot.up(batch); }
public void up(MessageBatch batch) { StableHeader hdr; for(Message msg: batch) { // remove and handle messages with flow control headers (STABLE_GOSSIP, STABILITY) if((hdr=msg.getHeader(id)) != null) { batch.remove(msg); handleUpEvent(hdr, batch.sender(), readDigest(msg.getRawBuffer(), msg.getOffset(), msg.getLength())); } } // only if message counting is on, and only for multicast messages (http://jira.jboss.com/jira/browse/JGRP-233) if(max_bytes > 0 && batch.dest() == null && !batch.isEmpty()) { boolean send_stable_msg=false; received.lock(); try { num_bytes_received+=batch.length(); if(num_bytes_received >= max_bytes) { log.trace("max_bytes has been reached (%s, bytes received=%s): triggers stable msg", max_bytes, num_bytes_received); num_bytes_received=0; send_stable_msg=true; } } finally { received.unlock(); } if(send_stable_msg) sendStableMessage(true); } if(!batch.isEmpty()) up_prot.up(batch); }
public void up(MessageBatch batch) { for(Message msg: batch) { FragHeader hdr=msg.getHeader(this.id); if(hdr != null) { // needs to be defragmented Message assembled_msg=unfragment(msg,hdr); if(assembled_msg != null) { // the reassembled msg has to be add in the right place (https://issues.jboss.org/browse/JGRP-1648), // and canot be added to the tail of the batch ! assembled_msg.setSrc(batch.sender()); batch.replace(msg, assembled_msg); avg_size_up.add(assembled_msg.length()); } else batch.remove(msg); } } if(!batch.isEmpty()) up_prot.up(batch); }
@Override public void up(MessageBatch batch) { for (Message msg : batch) { // If we have a join or merge request --> authenticate, else pass up GmsHeader gmsHeader =msg.getHeader(GMS_ID); Address remoteAddress = msg.getSrc(); if (needsAuthentication(gmsHeader, remoteAddress)) { SaslHeader saslHeader =msg.getHeader(id); if (saslHeader == null) { log.warn("Found GMS join or merge request but no SASL header"); sendRejectionMessage(gmsHeader.getType(), batch.sender(), "join or merge without an SASL header"); batch.remove(msg); } else if (!serverChallenge(gmsHeader, saslHeader, msg)) // authentication failed batch.remove(msg); // don't pass up } } if (!batch.isEmpty()) up_prot.up(batch); }
Address sender=batch.sender(); long new_credits=adjustCredit(received, sender, length); if(new_credits > 0)