Refine search
public void up(MessageBatch batch) { boolean copy=(copy_multicast_msgs || copy_unicast_msgs) && incoming_copies > 0; if(copy) { List<Message> copies=new ArrayList<>(); for(Message msg: batch) { Address dest=msg.getDest(); boolean multicast=dest == null; if((multicast && copy_multicast_msgs) || (!multicast && copy_unicast_msgs)) { for(int i=0; i < incoming_copies; i++) copies.add(msg.copy(true)); } } copies.forEach(batch::add); } if(!batch.isEmpty()) up_prot.up(batch); }
/** * Sends up a multiple messages in a {@link MessageBatch}. The sender of the batch is always the same, and so is the * destination (null == multicast messages). Messages in a batch can be OOB messages, regular messages, or mixed * messages, although the transport itself will create initial MessageBatches that contain only either OOB or * regular messages.<p/> * The default processing below sends messages up the stack individually, based on a matching criteria * (calling {@link #accept(org.jgroups.Message)}), and - if true - calls {@link #up(org.jgroups.Event)} * for that message and removes the message. If the batch is not empty, it is passed up, or else it is dropped.<p/> * Subclasses should check if there are any messages destined for them (e.g. using * {@link MessageBatch#getMatchingMessages(short,boolean)}), then possibly remove and process them and finally pass * the batch up to the next protocol. Protocols can also modify messages in place, e.g. ENCRYPT could decrypt all * encrypted messages in the batch, not remove them, and pass the batch up when done. * @param batch The message batch */ public void up(MessageBatch batch) { for(Iterator<Message> it=batch.iterator(); it.hasNext();) { Message msg=it.next(); if(msg != null && accept(msg)) { it.remove(); try { up(msg); } catch(Throwable t) { log.error(Util.getMessage("PassUpFailure"), t); } } } if(!batch.isEmpty()) up_prot.up(batch); }
protected void deliverBatch(MessageBatch batch) { try { if(batch.isEmpty()) return; if(is_trace) { Message first=batch.first(), last=batch.last(); StringBuilder sb=new StringBuilder(local_addr + ": delivering"); if(first != null && last != null) { UnicastHeader3 hdr1=first.getHeader(id), hdr2=last.getHeader(id); sb.append(" #").append(hdr1.seqno).append(" - #").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch(Throwable t) { log.warn(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t); } }
protected void deliverBatch(MessageBatch batch) { try { if(batch.isEmpty()) return; if(log.isTraceEnabled()) { Message first=batch.first(), last=batch.last(); StringBuilder sb=new StringBuilder(local_addr + ": delivering"); if(first != null && last != null) { SequencerHeader hdr1=first.getHeader(id), hdr2=last.getHeader(id); sb.append(" #").append(hdr1.seqno).append(" - #").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch(Throwable t) { log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t); } }
public void up(MessageBatch batch) { if(bypass) { up_prot.up(batch); return; } for(Message msg: batch) { if(msg.getHeader(id) != null) { batch.remove(msg); up(msg); // let the existing code handle this } else { if(msg.getDest() != null) { // skip unicast messages, process them right away batch.remove(msg); up_prot.up(msg); } } } if(!batch.isEmpty()) up_prot.up(batch); }
protected void deliverBatch(MessageBatch batch) { try { if(batch == null || batch.isEmpty()) return; if(is_trace) { Message first=batch.first(), last=batch.last(); StringBuilder sb=new StringBuilder(local_addr + ": delivering " + batch.sender()); if(first != null && last != null) { NakAckHeader2 hdr1=first.getHeader(id), hdr2=last.getHeader(id); sb.append("#").append(hdr1.seqno).append("-").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch(Throwable t) { log.error(Util.getMessage("FailedToDeliverMsg"), local_addr, "batch", batch, t); } }
public void up(MessageBatch batch) { for(Message msg: batch) { FragHeader hdr=msg.getHeader(this.id); if(hdr != null) { // needs to be defragmented Message assembled_msg=unfragment(msg,hdr); if(assembled_msg != null) // the reassembled msg has to be add in the right place (https://issues.jboss.org/browse/JGRP-1648), // and canot be added to the tail of the batch ! batch.replace(msg, assembled_msg); else batch.remove(msg); } } if(!batch.isEmpty()) up_prot.up(batch); }
public void up(MessageBatch batch) { for(Message msg: batch) { StompHeader hdr=msg.getHeader(id); if(hdr != null || forward_non_client_generated_msgs) { try { batch.remove(msg); up(msg); } catch(Throwable t) { log.error(Util.getMessage("FailedPassingUpMessage"), t); } } } if(!batch.isEmpty()) up_prot.up(batch); }