protected void removeAndDeliver(Table<Message> win, Address sender) { AtomicInteger adders=win.getAdders(); if(adders.getAndIncrement() != 0) return; final MessageBatch batch=new MessageBatch(win.size()).dest(local_addr).sender(sender).multicast(false); Supplier<MessageBatch> batch_creator=() -> batch; do { try { batch.reset(); win.removeMany(true, 0, null, batch_creator, BATCH_ACCUMULATOR); } catch(Throwable t) { log.error("failed removing messages from table for " + sender, t); } if(!batch.isEmpty()) { // batch is guaranteed to NOT contain any OOB messages as the drop_oob_msgs_filter removed them deliverBatch(batch); } } while(adders.decrementAndGet() != 0); }
protected void handleBatchFromSelf(MessageBatch batch, Entry entry) { List<LongTuple<Message>> list=new ArrayList<>(batch.size()); for(Iterator<Message> it=batch.iterator(); it.hasNext();) { Message msg=it.next(); UnicastHeader3 hdr; log.trace("%s <-- DATA(%s: %s)", local_addr, batch.sender(), printMessageList(list)); if(batch.mode() == MessageBatch.Mode.OOB) { MessageBatch oob_batch=new MessageBatch(local_addr, batch.sender(), batch.clusterName(), batch.multicast(), MessageBatch.Mode.OOB, len); for(LongTuple<Message> tuple: list) { long seq=tuple.getVal1(); oob_batch.add(msg); removeAndDeliver(win, batch.sender()); if(!batch.isEmpty()) up_prot.up(batch);
protected Entry get(final Address dest, final Address sender) { Entry entry=map.get(sender); if(entry == null) { IntFunction<MessageBatch> creator_func=cap -> new MessageBatch(cap).dest(dest) .clusterName(tp.getClusterNameAscii()). sender(sender).multicast(dest == null); Entry tmp=map.putIfAbsent(sender, entry=new Entry(creator_func)); if(tmp != null) entry=tmp; } return entry; }
/** * Transfers messages from other to this batch. Optionally clears the other batch after the transfer * @param other the other batch * @param clear If true, the transferred messages are removed from the other batch * @return the number of transferred messages (may be 0 if the other batch was empty) */ public int transferFrom(MessageBatch other, boolean clear) { if(other == null || this == other) return 0; int capacity=messages.length, other_size=other.size(); if(other_size == 0) return 0; if(capacity < other_size) messages=new Message[other_size]; System.arraycopy(other.messages, 0, this.messages, 0, other_size); if(this.index > other_size) for(int i=other_size; i < this.index; i++) messages[i]=null; this.index=other_size; if(clear) other.clear(); return other_size; }
ForkHeader hdr=msg.getHeader(id); if(hdr != null) { batch.remove(msg); List<Message> list=map.computeIfAbsent(hdr.fork_stack_id, k -> new ArrayList<>()); list.add(msg); continue; MessageBatch mb=new MessageBatch(batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb); if(!batch.isEmpty()) up_prot.up(batch);
public void up(MessageBatch batch) { if(batch.dest() == null) { // not a unicast batch up_prot.up(batch); return; if(local_addr == null || local_addr.equals(batch.sender())) { Entry entry=local_addr != null? send_table.get(local_addr) : null; if(entry != null) int size=batch.size(); Map<Short,List<LongTuple<Message>>> msgs=new LinkedHashMap<>(); ReceiverEntry entry=recv_table.get(batch.sender()); for(Iterator<Message> it=batch.iterator(); it.hasNext();) { Message msg=it.next(); UnicastHeader3 hdr; entry=getReceiverEntry(batch.sender(), hdr.seqno(), hdr.first, hdr.connId()); sendRequestForFirstSeqno(batch.sender()); else { if(msgs.keySet().retainAll(Collections.singletonList(entry.connId()))) // remove all conn-ids that don't match sendRequestForFirstSeqno(batch.sender()); List<LongTuple<Message>> list=msgs.get(entry.connId()); if(list != null && !list.isEmpty()) handleBatchReceived(entry, batch.sender(), list, batch.mode() == MessageBatch.Mode.OOB); if(!batch.isEmpty()) up_prot.up(batch);
protected void deliverBatch(MessageBatch batch) { try { if(batch == null || batch.isEmpty()) return; if(is_trace) { Message first=batch.first(), last=batch.last(); StringBuilder sb=new StringBuilder(local_addr + ": delivering " + batch.sender()); if(first != null && last != null) { NakAckHeader2 hdr1=first.getHeader(id), hdr2=last.getHeader(id); sb.append("#").append(hdr1.seqno).append("-").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch(Throwable t) { log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t); } }
public void up(MessageBatch batch) { StableHeader hdr; for(Message msg: batch) { // remove and handle messages with flow control headers (STABLE_GOSSIP, STABILITY) if((hdr=msg.getHeader(id)) != null) { batch.remove(msg); handleUpEvent(hdr, batch.sender(), readDigest(msg.getRawBuffer(), msg.getOffset(), msg.getLength())); } } // only if message counting is on, and only for multicast messages (http://jira.jboss.com/jira/browse/JGRP-233) if(max_bytes > 0 && batch.dest() == null && !batch.isEmpty()) { boolean send_stable_msg=false; received.lock(); try { num_bytes_received+=batch.length(); if(num_bytes_received >= max_bytes) { log.trace("max_bytes has been reached (%s, bytes received=%s): triggers stable msg", max_bytes, num_bytes_received); num_bytes_received=0; send_stable_msg=true; } } finally { received.unlock(); } if(send_stable_msg) sendStableMessage(true); } if(!batch.isEmpty()) up_prot.up(batch); }
public void run() { if(batch == null || (!batch.multicast() && tp.unicastDestMismatch(batch.dest()))) return; if(tp.statsEnabled()) { int batch_size=batch.size(); MsgStats msg_stats=tp.getMessageStats(); if(batch.getMode() == MessageBatch.Mode.OOB) msg_stats.incrNumOOBMsgsReceived(batch_size); else if(batch.getMode() == MessageBatch.Mode.INTERNAL) msg_stats.incrNumInternalMsgsReceived(batch_size); else msg_stats.incrNumMsgsReceived(batch_size); msg_stats.incrNumBatchesReceived(1); msg_stats.incrNumBytesReceived(batch.length()); tp.avgBatchSize().add(batch_size); } passBatchUp(); }
public void up(MessageBatch batch) { // let unicast message batches pass if(batch.dest() != null && (batch.mode() == MessageBatch.Mode.OOB && batch.mode() == MessageBatch.Mode.INTERNAL) || holes.contains(batch.sender())) { up_prot.up(batch); return; } if(barrier_closed.get()) { final Map<Address,Message> map=batch.dest() == null? mcast_queue : ucast_queue; map.put(batch.sender(), batch.last().putHeader(transport.getId(),new TpHeader(batch.clusterName()))); return; // queue the last message of the batch and drop the batch } Thread current_thread=Thread.currentThread(); in_flight_threads.put(current_thread, NULL); try { up_prot.up(batch); } finally { unblock(current_thread); } }
public void passBatchUp(MessageBatch batch, boolean perform_cluster_name_matching, boolean discard_own_mcast) { if(is_trace) log.trace("%s: received message batch of %d messages from %s", local_addr, batch.size(), batch.sender()); if(up_prot == null) return; // Discard if message's cluster name is not the same as our cluster name if(perform_cluster_name_matching && cluster_name != null && !cluster_name.equals(batch.clusterName())) { if(log_discard_msgs && log.isWarnEnabled()) { Address sender=batch.sender(); if(suppress_log_different_cluster != null) suppress_log_different_cluster.log(SuppressLog.Level.warn, sender, suppress_time_different_cluster_warnings, batch.clusterName(),cluster_name, sender); else log.warn(Util.getMessage("BatchDroppedDiffCluster"), batch.clusterName(),cluster_name, sender); } return; } if(batch.multicast() && discard_own_mcast && local_addr != null && local_addr.equals(batch.sender())) return; up_prot.up(batch); }
public void up(MessageBatch batch) { if(secret_key == null) { log.trace("%s: discarded %s batch from %s as secret key is null", local_addr, batch.dest() == null? "mcast" : "unicast", batch.sender()); return; } BlockingQueue<Cipher> cipherQueue = decoding_ciphers; try { Cipher cipher=cipherQueue.take(); try { BiConsumer<Message,MessageBatch> decrypter=new Decrypter(cipher); batch.forEach(decrypter); } finally { cipherQueue.offer(cipher); } } catch(InterruptedException e) { log.error("%s: failed processing batch; discarding batch", local_addr, e); // we need to drop the batch if we for example have a failure fetching a cipher, or else other messages // in the batch might make it up the stack, bypassing decryption! This is not an issue because encryption // is below NAKACK2 or UNICAST3, so messages will get retransmitted return; } if(!batch.isEmpty()) up_prot.up(batch); }
MessageBatch oob_batch=new MessageBatch(dest, sender, null, dest == null, MessageBatch.Mode.OOB, msgs.size()); if(loopback) { for(LongTuple<Message> tuple: msgs) { oob_batch.add(msg); oob_batch.add(tuple.getVal2());
public void up(MessageBatch batch) { Collection<Message> msgs=batch.getMatchingMessages(id, true); boolean updated=false; if(msgs != null) { for(Message msg: msgs) { FdHeader hdr=msg.getHeader(id); // header is not null at this point if(hdr.type == FdHeader.HEARTBEAT_ACK) updated=true; else up(msg); // SUSPECT and HEARTBEAT } } if(updated || (msg_counts_as_heartbeat && batch.sender() != null)) updateTimestamp(batch.sender()); if(!batch.isEmpty()) up_prot.up(batch); }
MessageBatch batch=new MessageBatch(null, entry.getKey(), cluster_name, true, entry.getValue()).mode(MessageBatch.Mode.OOB); batches.add(batch); MessageBatch batch=new MessageBatch(local_addr, entry.getKey(), cluster_name, false, entry.getValue()).mode(MessageBatch.Mode.OOB); batches.add(batch); MessageBatch batch=new MessageBatch(null, entry.getKey(), cluster_name, true, entry.getValue()).mode(MessageBatch.Mode.REG); batches.add(batch); MessageBatch batch=new MessageBatch(local_addr, entry.getKey(), cluster_name, false, entry.getValue()).mode(MessageBatch.Mode.REG); batches.add(batch); batches.stream().filter(batch -> !batch.isEmpty()).forEach(batch -> up_prot.up(batch));
public MessageBatch add(final Message msg) { add(msg, true); return this; }
/** * Removes messages with flags DONT_BUNDLE and OOB set and executes them in the oob or internal thread pool. JGRP-1737 */ protected void removeAndDispatchNonBundledMessages(MessageBatch oob_batch) { if(oob_batch == null) return; AsciiString tmp=oob_batch.clusterName(); byte[] cname=tmp != null? tmp.chars() : null; for(Iterator<Message> it=oob_batch.iterator(); it.hasNext();) { Message msg=it.next(); if(msg.isFlagSet(Message.Flag.DONT_BUNDLE) && msg.isFlagSet(Message.Flag.OOB)) { boolean internal=msg.isFlagSet(Message.Flag.INTERNAL); it.remove(); if(tp.statsEnabled()) tp.getMessageStats().incrNumOOBMsgsReceived(1); tp.submitToThreadPool(new SingleMessageHandlerWithClusterName(msg, cname), internal); } } }