public void process(MessageBatch batch, boolean oob, boolean internal) { if(oob || internal) { super.process(batch, oob, internal); return; } MessageTable table=batch.dest() == null? mcasts : ucasts; table.process(batch); }
protected void process(MessageBatch batch) { Address dest=batch.dest(), sender=batch.sender(); get(dest, sender).process(batch); }
protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
public void up(MessageBatch batch) { // let unicast message batches pass if(batch.dest() != null && (batch.mode() == MessageBatch.Mode.OOB && batch.mode() == MessageBatch.Mode.INTERNAL) || holes.contains(batch.sender())) { up_prot.up(batch); return; } if(barrier_closed.get()) { final Map<Address,Message> map=batch.dest() == null? mcast_queue : ucast_queue; map.put(batch.sender(), batch.last().putHeader(transport.getId(),new TpHeader(batch.clusterName()))); return; // queue the last message of the batch and drop the batch } Thread current_thread=Thread.currentThread(); in_flight_threads.put(current_thread, NULL); try { up_prot.up(batch); } finally { unblock(current_thread); } }
public void up(MessageBatch batch) { if(secret_key == null) { log.trace("%s: discarded %s batch from %s as secret key is null", local_addr, batch.dest() == null? "mcast" : "unicast", batch.sender()); return; } BlockingQueue<Cipher> cipherQueue = decoding_ciphers; try { Cipher cipher=cipherQueue.take(); try { BiConsumer<Message,MessageBatch> decrypter=new Decrypter(cipher); batch.forEach(decrypter); } finally { cipherQueue.offer(cipher); } } catch(InterruptedException e) { log.error("%s: failed processing batch; discarding batch", local_addr, e); // we need to drop the batch if we for example have a failure fetching a cipher, or else other messages // in the batch might make it up the stack, bypassing decryption! This is not an issue because encryption // is below NAKACK2 or UNICAST3, so messages will get retransmitted return; } if(!batch.isEmpty()) up_prot.up(batch); }
continue; MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb);
MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { fork_channel.up(mb);
public void up(MessageBatch batch) { if(batch.dest() == null) { // not a unicast batch up_prot.up(batch); return;
protected void removeAndDeliver(Table<Message> win, Address sender) { AtomicInteger adders=win.getAdders(); if(adders.getAndIncrement() != 0) return; final MessageBatch batch=new MessageBatch(win.size()).dest(local_addr).sender(sender).multicast(false); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); win.removeMany(true, 0, null, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) { // batch is guaranteed to NOT contain any OOB messages as the drop_oob_msgs_filter removed them deliverBatch(batch); } } while(adders.decrementAndGet() != 0); }
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case, * we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829). * Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool */ protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) { AtomicInteger adders=buf.getAdders(); if(adders.getAndIncrement() != 0) return; boolean remove_msgs=discard_delivered_msgs && !loopback; MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); // Don't include DUMMY and OOB_DELIVERED messages in the removed set buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) deliverBatch(batch); } while(adders.decrementAndGet() != 0); if(rebroadcasting) checkForRebroadcasts(); }
.dest(local_addr).sender(sender).multicast(false); Supplier<MessageBatch> batch_creator=() -> batch; do {
public void up(MessageBatch batch) { StableHeader hdr; for(Message msg: batch) { // remove and handle messages with flow control headers (STABLE_GOSSIP, STABILITY) if((hdr=msg.getHeader(id)) != null) { batch.remove(msg); handleUpEvent(hdr, batch.sender(), readDigest(msg.getRawBuffer(), msg.getOffset(), msg.getLength())); } } // only if message counting is on, and only for multicast messages (http://jira.jboss.com/jira/browse/JGRP-233) if(max_bytes > 0 && batch.dest() == null && !batch.isEmpty()) { boolean send_stable_msg=false; received.lock(); try { num_bytes_received+=batch.length(); if(num_bytes_received >= max_bytes) { log.trace("max_bytes has been reached (%s, bytes received=%s): triggers stable msg", max_bytes, num_bytes_received); num_bytes_received=0; send_stable_msg=true; } } finally { received.unlock(); } if(send_stable_msg) sendStableMessage(true); } if(!batch.isEmpty()) up_prot.up(batch); }
public void up(MessageBatch batch) { List<Short> response_ids=null; Address dest=batch.dest(); for(Message msg: batch) { if(!(msg.isFlagSet(Message.Flag.RSVP) || msg.isFlagSet(Message.Flag.RSVP_NB)))
handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName());
public void run() { if(batch == null || (!batch.multicast() && tp.unicastDestMismatch(batch.dest()))) return; if(tp.statsEnabled()) { int batch_size=batch.size(); MsgStats msg_stats=tp.getMessageStats(); if(batch.getMode() == MessageBatch.Mode.OOB) msg_stats.incrNumOOBMsgsReceived(batch_size); else if(batch.getMode() == MessageBatch.Mode.INTERNAL) msg_stats.incrNumInternalMsgsReceived(batch_size); else msg_stats.incrNumMsgsReceived(batch_size); msg_stats.incrNumBatchesReceived(1); msg_stats.incrNumBytesReceived(batch.length()); tp.avgBatchSize().add(batch_size); } passBatchUp(); }
public void process(MessageBatch batch, boolean oob, boolean internal) { if(oob || internal) { super.process(batch, oob, internal); return; } MessageTable table=batch.dest() == null? mcasts : ucasts; table.process(batch); }
protected void process(MessageBatch batch) { Address dest=batch.dest(), sender=batch.sender(); get(dest, sender).process(batch); }
protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
protected void removeAndDeliver(Table<Message> win, Address sender) { AtomicInteger adders=win.getAdders(); if(adders.getAndIncrement() != 0) return; final MessageBatch batch=new MessageBatch(win.size()).dest(local_addr).sender(sender).multicast(false); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); win.removeMany(true, 0, null, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) { // batch is guaranteed to NOT contain any OOB messages as the drop_oob_msgs_filter removed them deliverBatch(batch); } } while(adders.decrementAndGet() != 0); }
public void run() { if(batch == null || (!batch.multicast() && tp.unicastDestMismatch(batch.dest()))) return; if(tp.statsEnabled()) { int batch_size=batch.size(); MsgStats msg_stats=tp.getMessageStats(); if(batch.getMode() == MessageBatch.Mode.OOB) msg_stats.incrNumOOBMsgsReceived(batch_size); else if(batch.getMode() == MessageBatch.Mode.INTERNAL) msg_stats.incrNumInternalMsgsReceived(batch_size); else msg_stats.incrNumMsgsReceived(batch_size); msg_stats.incrNumBatchesReceived(1); msg_stats.incrNumBytesReceived(batch.length()); tp.avgBatchSize().add(batch_size); } passBatchUp(); }