Refine search
protected void sendException(Address requester, Throwable exception) { try { Message ex_msg=new Message(requester).setBuffer(Util.exceptionToBuffer(exception)) .putHeader(getId(), new StateHeader(StateHeader.STATE_EX)); down(ex_msg); } catch(Throwable t) { log.error("%s: failed sending exception %s to %s", local_addr, exception.toString(), requester); } }
protected void sendJoinRejectionMessage(Address dest, String error_msg) { if(dest == null) return; JoinRsp joinRes=new JoinRsp(error_msg); // specify the error message on the JoinRsp Message msg = new Message(dest).putHeader(GMS_ID, new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP)) .setBuffer(GMS.marshal(joinRes)); if(this.authenticate_coord) msg.putHeader(this.id, new AuthHeader(this.auth_token)); down_prot.down(msg); }
protected void sendJoinRejectionMessage(Address dest, String error_msg) { if (dest == null) return; JoinRsp joinRes = new JoinRsp(error_msg); // specify the error message on the JoinRsp Message msg = new Message(dest).putHeader(GMS_ID, new GmsHeader(GmsHeader.JOIN_RSP)).setBuffer( GMS.marshal(joinRes)); down_prot.down(msg); }
protected void sendMessage(byte[] b, int off, int len) throws IOException { Message m=new Message(stateRequester).putHeader(id, new StateHeader(StateHeader.STATE_PART)); // we're copying the buffer passed from the state provider here: if a BufferedOutputStream is used, the // buffer (b) will always be the same and can be modified after it has been set in the message ! // Fix for https://issues.jboss.org/browse/JGRP-1598 byte[] data=new byte[len]; System.arraycopy(b, off, data, 0, len); // m.setBuffer(b, off, len); m.setBuffer(data); bytesWrittenCounter+=len; if(Thread.interrupted()) throw interrupted((int)bytesWrittenCounter); down_prot.down(m); if(log.isTraceEnabled()) log.trace("%s: sent chunk of %s to %s",local_addr,Util.printBytes(len),stateRequester); }
protected void sendException(Address requester, Throwable exception) { try { Message ex_msg=new Message(requester).setBuffer(Util.exceptionToBuffer(exception)) .putHeader(getId(), new StateHeader(StateHeader.STATE_EX)); down(ex_msg); } catch(Throwable t) { log.error("%s: failed sending exception %s to %s", local_addr, exception, requester); } }
@Override public void members(List<PingData> mbrs) { PhysicalAddress own_physical_addr=(PhysicalAddress)down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); PingData data=new PingData(local_addr, false, org.jgroups.util.NameCache.get(local_addr), own_physical_addr); PingHeader hdr=new PingHeader(PingHeader.GET_MBRS_REQ).clusterName(cluster_name); Set<PhysicalAddress> physical_addrs=mbrs.stream().filter(ping_data -> ping_data != null && ping_data.getPhysicalAddr() != null) .map(PingData::getPhysicalAddr).collect(Collectors.toSet()); for(PhysicalAddress physical_addr: physical_addrs) { if(physical_addr != null && own_physical_addr.equals(physical_addr)) // no need to send the request to myself continue; // the message needs to be DONT_BUNDLE, see explanation above final Message msg=new Message(physical_addr).setFlag(Message.Flag.INTERNAL, Message.Flag.DONT_BUNDLE, Message.Flag.OOB) .putHeader(this.id, hdr).setBuffer(marshal(data)); log.trace("%s: sending discovery request to %s", local_addr, msg.getDest()); down_prot.down(msg); } }
private void rejectFlush(Collection<? extends Address> participants, long viewId) { if(participants == null) return; for (Address flushMember : participants) { if(flushMember == null) continue; Message reject = new Message(flushMember).src(localAddress).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(this.id, new FlushHeader(FlushHeader.ABORT_FLUSH, viewId)) .setBuffer(marshal(participants, null)); down_prot.down(reject); } }
/** * Starts the flush protocol * @param members List of participants in the flush protocol. Guaranteed to be non-null */ private void onSuspend(final List<Address> members) { Message msg = null; Collection<Address> participantsInFlush = null; synchronized (sharedLock) { flushCoordinator = localAddress; // start FLUSH only on group members that we need to flush participantsInFlush = members; participantsInFlush.retainAll(currentView.getMembers()); flushMembers.clear(); flushMembers.addAll(participantsInFlush); flushMembers.removeAll(suspected); msg = new Message(null).src(localAddress).setBuffer(marshal(participantsInFlush, null)) .putHeader(this.id, new FlushHeader(FlushHeader.START_FLUSH, currentViewId())); } if (participantsInFlush.isEmpty()) { flush_promise.setResult(SUCCESS_START_FLUSH); } else { down_prot.down(msg); if (log.isDebugEnabled()) log.debug(localAddress + ": flush coordinator " + " is starting FLUSH with participants " + participantsInFlush); } }
protected void retransmit(SeqnoList missing_msgs, final Address sender, boolean multicast_xmit_request) { Address dest=(multicast_xmit_request || this.use_mcast_xmit_req)? null : sender; // to whom do we send the XMIT request ? if(xmit_from_random_member && !local_addr.equals(sender)) { Address random_member=Util.pickRandomElement(members); if(random_member != null && !local_addr.equals(random_member)) dest=random_member; } Message retransmit_msg=new Message(dest).setBuffer(Util.streamableToBuffer(missing_msgs)) .setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(this.id, NakAckHeader2.createXmitRequestHeader(sender)); log.trace("%s: sending XMIT_REQ (%s) to %s", local_addr, missing_msgs, dest); down_prot.down(retransmit_msg); if(stats) xmit_reqs_sent.add(missing_msgs.size()); }
/** * Fetches the digests from all members and installs them again. Used only for diagnosis and support; don't * use this otherwise ! */ protected void fixDigests() { Digest digest=fetchDigestsFromAllMembersInSubPartition(gms.view, null); Message msg=new Message().putHeader(gms.getId(), new GMS.GmsHeader(GMS.GmsHeader.INSTALL_DIGEST)) .setBuffer(GMS.marshal(null, digest)); gms.getDownProtocol().down(msg); }
protected void encryptAndSend(Message msg) throws Exception { EncryptHeader hdr=new EncryptHeader(EncryptHeader.ENCRYPT, symVersion()); // copy neeeded because same message (object) may be retransmitted -> prevent double encryption Message msgEncrypted=msg.copy(false).putHeader(this.id, hdr); if(msg.getLength() > 0) msgEncrypted.setBuffer(code(msg.getRawBuffer(),msg.getOffset(),msg.getLength(),false)); else { // length is 0 byte[] payload=msg.getRawBuffer(); if(payload != null) // we don't encrypt empty buffers (https://issues.jboss.org/browse/JGRP-2153) msgEncrypted.setBuffer(payload, msg.getOffset(), msg.getLength()); } down_prot.down(msgEncrypted); }
private Object handle(Message message) { Header header = (Header) message.getHeader(this.id); // If this is a request expecting a response, don't leave the requester hanging - send an identifiable response on which it can filter if ((header != null) && (header.type == Header.REQ) && header.rspExpected()) { Message response = message.makeReply().setFlag(message.getFlags()).clearFlag(Message.Flag.RSVP, Message.Flag.INTERNAL); response.putHeader(FORK.ID, message.getHeader(FORK.ID)); response.putHeader(this.id, new Header(Header.RSP, header.req_id, header.corrId)); response.setBuffer(UNKNOWN_FORK_RESPONSE.array()); fork.getProtocolStack().getChannel().down(response); } return null; } });
/** Send back a response containing view and digest to sender */ protected void sendMergeResponse(Address sender, View view, Digest digest, MergeId merge_id) { Message msg=new Message(sender).setBuffer(GMS.marshal(view, digest)).setFlag(Message.Flag.OOB,Message.Flag.INTERNAL) .putHeader(gms.getId(), new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP).mergeId(merge_id)); gms.getDownProtocol().down(msg); }
/** Sends a retransmit request to the given sender */ protected void retransmit(SeqnoList missing, Address sender) { Message xmit_msg=new Message(sender).setBuffer(Util.streamableToBuffer(missing)) .setFlag(Message.Flag.OOB, Message.Flag.INTERNAL).putHeader(id, UnicastHeader3.createXmitReqHeader()); if(is_trace) log.trace("%s: sending XMIT_REQ (%s) to %s", local_addr, missing, sender); down_prot.down(xmit_msg); xmit_reqs_sent.add(missing.size()); }
/** * Sends a MERGE_REQ to all coords and populates a list of MergeData (in merge_rsps). Returns after coords.size() * response have been received, or timeout msecs have elapsed (whichever is first).<p> * If a subgroup coordinator rejects the MERGE_REQ (e.g. because of participation in a different merge), * <em>that member will be removed from coords !</em> * @param coords A map of coordinatgor addresses and associated membership lists * @param new_merge_id The new merge id * @param timeout Max number of msecs to wait for the merge responses from the subgroup coords */ protected boolean getMergeDataFromSubgroupCoordinators(Map<Address,Collection<Address>> coords, MergeId new_merge_id, long timeout) { boolean gotAllResponses; long start=System.currentTimeMillis(); merge_rsps.reset(coords.keySet()); log.trace("%s: sending MERGE_REQ to %s", gms.local_addr, coords.keySet()); for(Map.Entry<Address,Collection<Address>> entry: coords.entrySet()) { Address coord=entry.getKey(); Collection<Address> mbrs=entry.getValue(); Message msg=new Message(coord).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(gms.getId(), new GMS.GmsHeader(GMS.GmsHeader.MERGE_REQ).mbr(gms.local_addr).mergeId(new_merge_id)) .setBuffer(GMS.marshal(mbrs)); gms.getDownProtocol().down(msg); } // wait until num_rsps_expected >= num_rsps or timeout elapsed merge_rsps.waitForAllResponses(timeout); gotAllResponses=merge_rsps.hasAllResponses(); long time=System.currentTimeMillis() - start; log.trace("%s: collected %d merge response(s) in %d ms", gms.local_addr, merge_rsps.numberOfValidResponses(), time); return gotAllResponses; }