private void createJChannel() throws Exception { System.out.println("Creating Channel"); receiver = new MyReceiver(numberOfNodes, this); jChannel = new JChannel(config); jChannel.setReceiver(receiver); jChannel.connect(CLUSTER); receiver.waitUntilClusterIsFormed(); Util.registerChannel(jChannel, JMX_DOMAIN); members.addAll(jChannel.getView().getMembers()); }
public Object up(Message msg) { LockingHeader hdr=msg.getHeader(id); if(hdr == null) return up_prot.up(msg); Request req=null; try { req=Util.streamableFromBuffer(Request::new, msg.getRawBuffer(), msg.getOffset(), msg.getLength()) .sender(msg.src()); } catch(Exception ex) { log.error("%s: failed deserializing request", local_addr, ex); return null; } if(req.type != Type.LOCK_INFO_REQ && req.type != Type.LOCK_INFO_RSP && req.type != Type.LOCK_REVOKED && null != view && !view.containsMember(msg.getSrc())) { log.error("%s: received request from '%s' but member is not present in the current view - ignoring request", local_addr, msg.src()); return null; } requestReceived(req); return null; }
protected Object installView(byte[] buf, int offset, int length) { try { ViewData data=Util.streamableFromByteBuffer(ViewData::new, buf, offset, length); if(data.uuids != null) NameCache.add(data.uuids); remote_view=data.remote_view; if(global_view == null || (data.global_view != null &&!global_view.equals(data.global_view))) { global_view=data.global_view; synchronized(this) { if(data.global_view.getViewId().getId() > global_view_id) global_view_id=data.global_view.getViewId().getId(); } if(present_global_views) return up_prot.up(new Event(Event.VIEW_CHANGE, global_view)); } } catch(Exception e) { log.error(Util.getMessage("FailedInstallingView"), e); } return null; }
/** Checks if two views have the same members regardless of order. E.g. {A,B,C} and {B,A,C} returns true */ public static boolean sameMembers(View v1, View v2) { if(v1 == v2) return true; if(v1.size() != v2.size()) return false; Address[][] diff=diff(v1, v2); return diff[0].length == 0 && diff[1].length == 0; }
private boolean onViewChange(View view) { boolean coordinatorLeft = false; View oldView; synchronized (sharedLock) { suspected.retainAll(view.getMembers()); oldView = currentView; currentView = view; coordinatorLeft = !oldView.getMembers().isEmpty() && !view.getMembers().isEmpty() && !view.containsMember(oldView.getCreator()); } if (log.isDebugEnabled()) log.debug(localAddress + ": installing view " + view); return coordinatorLeft; }
protected void handleView(View view) { view_size=view.size(); Address tmp=Util.pickNext(view.getMembers(), local_addr); if(tmp != null && !tmp.equals(local_addr)) { next=tmp; if(log.isDebugEnabled()) log.debug("next=" + next); } }
public Object up(Message msg) { GmsHeader hdr=msg.getHeader(this.id); if(hdr == null) return up_prot.up(msg); if (viewId != null && new_view.getViewId().compareToIDs(viewId) <= 0) return null; if(new_view instanceof DeltaView) { try { log.trace("%s: received delta view %s", local_addr, new_view); new_view=createViewFromDeltaView(view,(DeltaView)new_view); log.warn("%s: failed to create view from delta-view; dropping view: %s", local_addr, t.toString()); log.trace("%s: sending request for full view to %s", local_addr, msg.src()); down_prot.down(full_view_req); return null; if(!new_view.containsMember(coord)) { Digest digest=(Digest)down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if(view_id != null) { ViewId my_view_id=this.view != null? this.view.getViewId() : null; if(my_view_id != null && my_view_id.compareToIDs(view_id) <= 0) return null; // my view-id doesn't differ from sender's view-id; no need to send view if(log.isErrorEnabled()) log.error(Util.getMessage("GmsHeaderWithType"), hdr.type);
final List<Address> current_mbrs=view.getMembers(); return new MutableDigest(view.getMembersRaw()) .set((Digest)gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr))); Message get_digest_req=new Message().setFlag(Message.Flag.OOB, Message.Flag.INTERNAL) .putHeader(gms.getId(), new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ).mergeId(merge_id)); gms.getDownProtocol().down(get_digest_req); Digest digest=(Digest)gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); digest_collector.add(gms.local_addr, digest); digest_collector.waitForAllResponses(max_wait_time); if(log.isTraceEnabled()) { if(digest_collector.hasAllResponses()) log.trace("%s: fetched all digests for %s", gms.local_addr, current_mbrs); else log.trace("%s: fetched incomplete digests (after timeout of %d) ms for %s", gms.local_addr, max_wait_time, current_mbrs);
public Object down(Event evt) { switch(evt.getType()) { case Event.VIEW_CHANGE: Object retval=down_prot.down(evt); View view=evt.getArg(); members.addAll(view.getMembers()); bcast_task.adjustSuspectedMembers(members); computePingDest(null); if(view.size() <= 1) stopMonitor(); else if(!isMonitorRunning()) FdHeader hdr=new FdHeader(FdHeader.UNSUSPECT); hdr.mbrs=new ArrayList<>(); hdr.mbrs.add(evt.getArg()); hdr.from=local_addr; Message unsuspect_msg=new Message().setFlag(Message.Flag.INTERNAL).putHeader(id, hdr); log.trace("%s: broadcasting UNSUSPECT message (mbrs=%s)", local_addr, hdr.mbrs); down_prot.down(unsuspect_msg); break; break; return down_prot.down(evt);
public Object up(Message msg) { PingHeader hdr=msg.getHeader(this.id); if(hdr == null) return up_prot.up(msg); PingData data=readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr=data != null? data.getAddress() : msg.src(); log.warn("cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); log.warn("%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); Map<Address,PhysicalAddress> cache=(Map<Address,PhysicalAddress>)down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if(cache != null) { for(Map.Entry<Address,PhysicalAddress> entry: cache.entrySet()) { Address addr=entry.getKey(); if(addr.equals(local_addr) || (view != null && view.containsMember(addr))) { PhysicalAddress physical_addr=entry.getValue(); sendDiscoveryResponse(addr, physical_addr, NameCache.get(addr), msg.getSrc(), isCoord(addr)); log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src());
public void receive(Message msg) { Address sender=msg.getSrc(); if(bridge.getAddress().equals(sender)) // discard my own messages return; RelayHeader hdr=msg.getHeader(id); switch(hdr.type) { case DISSEMINATE: // should not occur here, but we'll ignore it anyway break; case FORWARD: sendOnLocalCluster(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); break; case VIEW: try { ViewData data=Util.streamableFromByteBuffer(ViewData::new, msg.getRawBuffer(), msg.getOffset(), msg.getLength()); List<Address> mbrs=new LinkedList<>(data.remote_view.getMembers()); data.remote_view=new View(data.remote_view.getViewId(), mbrs); log.error(Util.getMessage("FailedUnmarshallingViewFromRemoteCluster"), e);
public Object down(Event evt) { switch(evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> new_members=((View)evt.getArg()).getMembers(); synchronized(members) { members.clear(); if(new_members != null && !new_members.isEmpty()) members.addAll(new_members); } return down_prot.down(evt); } return down_prot.down(evt); // Pass on to the layer below us }
public void start() { try { channel=new JChannel(props); disp=(RpcDispatcher)new RpcDispatcher(channel, this) .setMembershipListener(this).setStateListener(this); channel.connect(channel_name); System.out.println("\nQuote Server started at " + new Date()); System.out.println("Joined channel '" + channel_name + "' (" + channel.getView().size() + " members)"); channel.getState(null, 0); System.out.println("Ready to serve requests"); } catch(Exception e) { log.error("QuoteServer.start() : " + e); System.exit(-1); } }
/** * Starts the flush protocol * @param members List of participants in the flush protocol. Guaranteed to be non-null */ private void onSuspend(final List<Address> members) { Message msg = null; Collection<Address> participantsInFlush = null; synchronized (sharedLock) { flushCoordinator = localAddress; // start FLUSH only on group members that we need to flush participantsInFlush = members; participantsInFlush.retainAll(currentView.getMembers()); flushMembers.clear(); flushMembers.addAll(participantsInFlush); flushMembers.removeAll(suspected); msg = new Message(null).src(localAddress).setBuffer(marshal(participantsInFlush, null)) .putHeader(this.id, new FlushHeader(FlushHeader.START_FLUSH, currentViewId())); } if (participantsInFlush.isEmpty()) { flush_promise.setResult(SUCCESS_START_FLUSH); } else { down_prot.down(msg); if (log.isDebugEnabled()) log.debug(localAddress + ": flush coordinator " + " is starting FLUSH with participants " + participantsInFlush); } }
protected void start(String props, String name) throws Exception { channel=new JChannel(props).name(name); // TP transport=channel.getProtocolStack().getTransport(); // transport.setThreadPool(new DirectExecutor()); disp=new RpcDispatcher(channel, this).setMembershipListener(this); disp.setMethodLookup(ignored -> requestMethod); channel.connect("rt"); View view=channel.getView(); if(view.size() > 2) System.err.printf("More than 2 members found (%s); terminating\n", view); else loop(); Util.close(channel, disp); }
protected void sendDiscoveryResponse(Address logical_addr, PhysicalAddress physical_addr, String logical_name, final Address sender, boolean coord) { final PingData data=new PingData(logical_addr, is_server, logical_name, physical_addr).coord(coord); final Message rsp_msg=new Message(sender).setFlag(Message.Flag.INTERNAL, Message.Flag.OOB, Message.Flag.DONT_BUNDLE) .putHeader(this.id, new PingHeader(PingHeader.GET_MBRS_RSP)).setBuffer(marshal(data)); if(stagger_timeout > 0) { int view_size=view != null? view.size() : 10; int rank=Util.getRank(view, local_addr); // returns 0 if view or local_addr are null long sleep_time=rank == 0? Util.random(stagger_timeout) : stagger_timeout * rank / view_size - (stagger_timeout / view_size); timer.schedule(() -> { log.trace("%s: received GET_MBRS_REQ from %s, sending staggered response %s", local_addr, sender, data); down_prot.down(rsp_msg); }, sleep_time, TimeUnit.MILLISECONDS, sends_can_block); return; } log.trace("%s: received GET_MBRS_REQ from %s, sending response %s", local_addr, sender, data); down_prot.down(rsp_msg); }
GmsHeader hdr; long start, stop; ViewId vid=new_view.getVid(); int size=-1; members=new_view.getMembers(); log.trace("mcasting view {" + new_view + "} (" + new_view.size() + " mbrs)\n"); view_change_msg=new Message(); // bcast to all members hdr=new GmsHeader(GmsHeader.VIEW, new_view); hdr.my_digest=digest; view_change_msg.putHeader(name, hdr); down_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.MSG, view_change_msg));
left_mbrs=change_key_on_leave && this.view != null && !v.containsMembers(this.view.getMembersRaw()); create_new_key=secret_key == null || left_mbrs; super.handleView(v); key_requesters.retainAll(v.getMembers()); key_server_addr=v.getCoord(); // the coordinator is the keyserver if(Objects.equals(key_server_addr, local_addr)) { if(!Objects.equals(key_server_addr, old_key_server)) log.debug("%s: I'm the new key server", local_addr); if(create_new_key) { createNewKey(); if(key_requesters != null) key_requesters.stop(); List<Address> targets=new ArrayList<>(v.getMembers()); targets.remove(local_addr); key_requesters=new ResponseCollectorTask<Boolean>(targets) .setPeriodicTask(c -> { Message msg=new Message(null).setTransientFlag(Message.TransientFlag.DONT_LOOPBACK) .putHeader(id, new EncryptHeader(EncryptHeader.NEW_KEYSERVER, sym_version)); down_prot.down(msg); }) .start(getTransport().getTimer(), 0, key_server_interval);
/** Typically received by the coord, which sends its cache contents to the sender (new joiner). However, we don't * send one large message, but rather N messages (1 per cluster member). The reason is that we don't know where in * the stack NAMING will be running and therefore cannot assume fragmentation of large messages. */ protected void handleCacheRequest(Address sender) { int view_size=view != null? view.size() : 0; if(view_size == 0) return; for(Address addr: view.getMembersRaw()) { if(Objects.equals(addr, sender)) continue; String logical_name=NameCache.get(addr); if(logical_name == null) continue; Header hdr=new Header(Type.CACHE_RSP, addr, logical_name); Message msg=new Message(sender).putHeader(id, hdr); if(log.isTraceEnabled()) log.trace("%s: sending %s to %s", local_addr, hdr, sender); try { down_prot.down(msg); } catch(Throwable t) { log.error("failed sending CACHE_RSP", t); } } }
boolean validView=new_view != null && new_view.size() > 0; if(validView && flushProtocolInStack) { int attemptCount = 0; while (attemptCount < maxAttempts) { if (attemptCount > 0) Util.sleepRandom(randomFloor, randomCeiling); try { up_prot.up(new Event(Event.SUSPEND, new ArrayList<>(new_view.getMembers()))); successfulFlush = true; break; if(log.isTraceEnabled()) log.trace(local_addr + ": successful GMS flush by coordinator"); up(new Event(Event.RESUME, new ArrayList<>(new_view.getMembers()))); if (log.isWarnEnabled()) log.warn(local_addr + ": GMS flush by coordinator failed");