protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
public void passBatchUp(MessageBatch batch, boolean perform_cluster_name_matching, boolean discard_own_mcast) { if(is_trace) log.trace("%s: received message batch of %d messages from %s", local_addr, batch.size(), batch.sender()); if(up_prot == null) return; // Discard if message's cluster name is not the same as our cluster name if(perform_cluster_name_matching && cluster_name != null && !cluster_name.equals(batch.clusterName())) { if(log_discard_msgs && log.isWarnEnabled()) { Address sender=batch.sender(); if(suppress_log_different_cluster != null) suppress_log_different_cluster.log(SuppressLog.Level.warn, sender, suppress_time_different_cluster_warnings, batch.clusterName(),cluster_name, sender); else log.warn(Util.getMessage("BatchDroppedDiffCluster"), batch.clusterName(),cluster_name, sender); } return; } if(batch.multicast() && discard_own_mcast && local_addr != null && local_addr.equals(batch.sender())) return; up_prot.up(batch); }
public void up(MessageBatch batch) { // let unicast message batches pass if(batch.dest() != null && (batch.mode() == MessageBatch.Mode.OOB && batch.mode() == MessageBatch.Mode.INTERNAL) || holes.contains(batch.sender())) { up_prot.up(batch); return; } if(barrier_closed.get()) { final Map<Address,Message> map=batch.dest() == null? mcast_queue : ucast_queue; map.put(batch.sender(), batch.last().putHeader(transport.getId(),new TpHeader(batch.clusterName()))); return; // queue the last message of the batch and drop the batch } Thread current_thread=Thread.currentThread(); in_flight_threads.put(current_thread, NULL); try { up_prot.up(batch); } finally { unblock(current_thread); } }
/** * Removes messages with flags DONT_BUNDLE and OOB set and executes them in the oob or internal thread pool. JGRP-1737 */ protected void removeAndDispatchNonBundledMessages(MessageBatch oob_batch) { if(oob_batch == null) return; AsciiString tmp=oob_batch.clusterName(); byte[] cname=tmp != null? tmp.chars() : null; for(Iterator<Message> it=oob_batch.iterator(); it.hasNext();) { Message msg=it.next(); if(msg.isFlagSet(Message.Flag.DONT_BUNDLE) && msg.isFlagSet(Message.Flag.OOB)) { boolean internal=msg.isFlagSet(Message.Flag.INTERNAL); it.remove(); if(tp.statsEnabled()) tp.getMessageStats().incrNumOOBMsgsReceived(1); tp.submitToThreadPool(new SingleMessageHandlerWithClusterName(msg, cname), internal); } } }
continue; MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb);
MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { fork_channel.up(mb);
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case, * we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829). * Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool */ protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) { AtomicInteger adders=buf.getAdders(); if(adders.getAndIncrement() != 0) return; boolean remove_msgs=discard_delivered_msgs && !loopback; MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); // Don't include DUMMY and OOB_DELIVERED messages in the removed set buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) deliverBatch(batch); } while(adders.decrementAndGet() != 0); if(rebroadcasting) checkForRebroadcasts(); }
MessageBatch oob_batch=new MessageBatch(local_addr, batch.sender(), batch.clusterName(), batch.multicast(), MessageBatch.Mode.OOB, len); for(LongTuple<Message> tuple: list) { long seq=tuple.getVal1();
handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName());
protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
public void passBatchUp(MessageBatch batch, boolean perform_cluster_name_matching, boolean discard_own_mcast) { if(is_trace) log.trace("%s: received message batch of %d messages from %s", local_addr, batch.size(), batch.sender()); if(up_prot == null) return; // Discard if message's cluster name is not the same as our cluster name if(perform_cluster_name_matching && cluster_name != null && !cluster_name.equals(batch.clusterName())) { if(log_discard_msgs && log.isWarnEnabled()) { Address sender=batch.sender(); if(suppress_log_different_cluster != null) suppress_log_different_cluster.log(SuppressLog.Level.warn, sender, suppress_time_different_cluster_warnings, batch.clusterName(),cluster_name, sender); else log.warn(Util.getMessage("BatchDroppedDiffCluster"), batch.clusterName(),cluster_name, sender); } return; } if(batch.multicast() && discard_own_mcast && local_addr != null && local_addr.equals(batch.sender())) return; up_prot.up(batch); }
continue; MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb);
/** * Removes messages with flags DONT_BUNDLE and OOB set and executes them in the oob or internal thread pool. JGRP-1737 */ protected void removeAndDispatchNonBundledMessages(MessageBatch oob_batch) { if(oob_batch == null) return; AsciiString tmp=oob_batch.clusterName(); byte[] cname=tmp != null? tmp.chars() : null; for(Iterator<Message> it=oob_batch.iterator(); it.hasNext();) { Message msg=it.next(); if(msg.isFlagSet(Message.Flag.DONT_BUNDLE) && msg.isFlagSet(Message.Flag.OOB)) { boolean internal=msg.isFlagSet(Message.Flag.INTERNAL); it.remove(); if(tp.statsEnabled()) tp.getMessageStats().incrNumOOBMsgsReceived(1); tp.submitToThreadPool(new SingleMessageHandlerWithClusterName(msg, cname), internal); } } }
MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { fork_channel.up(mb);
public void up(MessageBatch batch) { // let unicast message batches pass if(batch.dest() != null && (batch.mode() == MessageBatch.Mode.OOB && batch.mode() == MessageBatch.Mode.INTERNAL) || holes.contains(batch.sender())) { up_prot.up(batch); return; } if(barrier_closed.get()) { final Map<Address,Message> map=batch.dest() == null? mcast_queue : ucast_queue; map.put(batch.sender(), batch.last().putHeader(transport.getId(),new TpHeader(batch.clusterName()))); return; // queue the last message of the batch and drop the batch } Thread current_thread=Thread.currentThread(); in_flight_threads.put(current_thread, NULL); try { up_prot.up(batch); } finally { unblock(current_thread); } }
/** Efficient way of checking whether another thread is already processing messages from sender. If that's the case, * we return immediately and let the existing thread process our message (https://jira.jboss.org/jira/browse/JGRP-829). * Benefit: fewer threads blocked on the same lock, these threads can be returned to the thread pool */ protected void removeAndDeliver(Table<Message> buf, Address sender, boolean loopback, AsciiString cluster_name) { AtomicInteger adders=buf.getAdders(); if(adders.getAndIncrement() != 0) return; boolean remove_msgs=discard_delivered_msgs && !loopback; MessageBatch batch=new MessageBatch(buf.size()).dest(null).sender(sender).clusterName(cluster_name).multicast(true); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); // Don't include DUMMY and OOB_DELIVERED messages in the removed set buf.removeMany(remove_msgs, 0, no_dummy_and_no_oob_delivered_msgs_and_no_dont_loopback_msgs, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) deliverBatch(batch); } while(adders.decrementAndGet() != 0); if(rebroadcasting) checkForRebroadcasts(); }
MessageBatch oob_batch=new MessageBatch(local_addr, batch.sender(), batch.clusterName(), batch.multicast(), MessageBatch.Mode.OOB, len); for(LongTuple<Message> tuple: list) { long seq=tuple.getVal1();
handleMessages(batch.dest(), batch.sender(), msgs, batch.mode() == MessageBatch.Mode.OOB, batch.clusterName());