public void addCondition(String cond) { executions.add(new Date() + ": " + cond); }
public boolean addIfAbsent(T obj) { return obj != null && !contains(obj) && add(obj); }
protected void suspect(Set<Address> suspects) { if(suspects == null) return; suspects.remove(local_addr); suspects.forEach(suspect -> suspect_history.add(String.format("%s: %s", new Date(), suspect))); suspected_mbrs.addAll(suspects); List<Address> eligible_mbrs=new ArrayList<>(this.members); eligible_mbrs.removeAll(suspected_mbrs); // Check if we're coord, then send up the stack if(local_addr != null && !eligible_mbrs.isEmpty() && local_addr.equals(eligible_mbrs.get(0))) { log.debug("%s: suspecting %s", local_addr, suspected_mbrs); up_prot.up(new Event(Event.SUSPECT, suspected_mbrs)); down_prot.down(new Event(Event.SUSPECT, suspected_mbrs)); } }
protected boolean _add(R req) { if(req == null || suspended.get()) { log().trace("%s: queue is suspended; request %s is discarded", gms.getLocalAddress(), req); return false; } String log=new Date() + ": " + req.toString(); count.incrementAndGet(); lock.lock(); try { if(!requests.contains(req)) { requests.add(req); history.add(log); } return count.decrementAndGet() == 0 && !processing && setProcessing(true); } finally { lock.unlock(); } }
protected boolean _add(Collection<R> reqs) { if(reqs == null || reqs.isEmpty() || suspended.get()) { log().trace("%s: queue is suspended; requests are discarded", gms.getLocalAddress()); return false; } count.incrementAndGet(); lock.lock(); try { for(R req: reqs) { if(!requests.contains(req)) { requests.add(req); history.add(new Date() + ": " + req.toString()); } } return count.decrementAndGet() == 0 && !processing && setProcessing(true); } finally { lock.unlock(); } }
@SuppressWarnings("unchecked") protected boolean _add(R ... reqs) { if(reqs == null || reqs.length == 0 || suspended.get()) { log().trace("%s: queue is suspended; requests are discarded", gms.getLocalAddress()); return false; } count.incrementAndGet(); lock.lock(); try { for(R req: reqs) { if(!requests.contains(req)) { requests.add(req); history.add(new Date() + ": " + req.toString()); } } return count.decrementAndGet() == 0 && !processing && setProcessing(true); } finally { lock.unlock(); } }
/** Called by ping task; will result in all members of host getting suspected */ protected void suspect(InetAddress host) { List<Address> suspects; suspect_history.add(new Tuple<>(host, System.currentTimeMillis())); // we need wall clock time here synchronized(hosts) { List<Address> tmp=hosts.get(host); suspects=tmp != null? new ArrayList<>(tmp) : null; } if(suspects != null) { log.debug("%s: suspecting host %s; suspected members: %s", local_addr, host, Util.printListWithDelimiter(suspects, ",")); suspect(suspects); } }
protected void suspect(List<Address> suspects) { if(suspects == null || suspects.isEmpty()) return; num_suspect_events+=suspects.size(); final List<Address> eligible_mbrs=new ArrayList<>(); synchronized(this) { for(Address suspect: suspects) { suspect_history.add(new Tuple<>(suspect, System.currentTimeMillis())); suspected_mbrs.add(suspect); } eligible_mbrs.addAll(members); eligible_mbrs.removeAll(suspected_mbrs); has_suspected_mbrs=!suspected_mbrs.isEmpty(); } // Check if we're coord, then send up the stack if(local_addr != null && !eligible_mbrs.isEmpty() && local_addr.equals(eligible_mbrs.get(0))) { log.debug("%s: suspecting %s", local_addr, suspects); up_prot.up(new Event(Event.SUSPECT, suspects)); down_prot.down(new Event(Event.SUSPECT, suspects)); } }
protected void suspect(List<Address> suspects) { if(suspects == null || suspects.isEmpty()) return; num_suspect_events+=suspects.size(); final List<Address> eligible_mbrs; synchronized(this) { for(Address suspect: suspects) { suspect_history.add(new Tuple<>(suspect, System.currentTimeMillis())); // need wall clock time suspected_mbrs.add(suspect); } eligible_mbrs=new ArrayList<>(members); eligible_mbrs.removeAll(suspected_mbrs); has_suspected_mbrs=!suspected_mbrs.isEmpty(); } // Check if we're coord, then send up the stack if(local_addr != null && !eligible_mbrs.isEmpty() && local_addr.equals(eligible_mbrs.get(0))) { log.debug("%s: suspecting %s", local_addr, suspects); up_prot.up(new Event(Event.SUSPECT, suspects)); down_prot.down(new Event(Event.SUSPECT, suspects)); } }
digest_history.add(sb.toString()); if(log.isDebugEnabled()) log.debug(sb.toString());
protected void queueMessage(Message msg, long seqno) { if(become_server_queue != null) { become_server_queue.add(msg); log.trace("%s: message %s::%d was added to queue (not yet server)", local_addr, msg.getSrc(), seqno); } else log.trace("%s: message %s::%d was discarded (not yet server)", local_addr, msg.getSrc(), seqno); }
sb.append("\nnew seqno for " + local_addr + ": " + seqno); if(sb != null) digest_history.add(sb.toString()); if(log.isDebugEnabled()) log.debug(sb.toString());
if(stats) { num_suspect_events++; suspect_history.add(String.format("%s: %s", new Date(), dest));
/** * Sends a SUSPECT message to all group members. Only the coordinator (or the next member in line if the coord * itself is suspected) will react to this message by installing a new view. To overcome the unreliability * of the SUSPECT message (it may be lost because we are not above any retransmission layer), the following scheme * is used: after sending the SUSPECT message, it is also added to the broadcast task, which will periodically * re-send the SUSPECT until a view is received in which the suspected process is not a member anymore. The reason is * that - at one point - either the coordinator or another participant taking over for a crashed coordinator, will * react to the SUSPECT message and issue a new view, at which point the broadcast task stops. */ protected void broadcastSuspectMessage(Address suspected_mbr) { if(suspected_mbr == null) return; log.debug("%s: broadcasting suspect(%s)", local_addr, suspected_mbr); // 1. Send a SUSPECT message right away; the broadcast task will take some time to send it (sleeps first) FdHeader hdr=new FdHeader(FdHeader.SUSPECT).mbrs(Collections.singleton(suspected_mbr)); Message suspect_msg=new Message().setFlag(Message.Flag.INTERNAL).putHeader(this.id, hdr); down_prot.down(suspect_msg); // 2. Add to broadcast task and start latter (if not yet running). The task will end when // suspected members are removed from the membership bcast_task.addSuspectedMember(suspected_mbr); if(stats) { num_suspect_events++; suspect_history.add(String.format("%s: %s", new Date(), suspected_mbr)); } }
public synchronized boolean setMergeId(MergeId expected, MergeId new_value) { boolean match=Util.match(this.merge_id, expected); if(match) { if(new_value != null && merge_id_history.contains(new_value)) return false; else merge_id_history.add(new_value); this.merge_id=new_value; if(this.merge_id != null) { // Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE gms.getViewHandler().suspend(); gms.getDownProtocol().down(new Event(Event.SUSPEND_STABLE, 20000)); startMergeKiller(); } } return match; }
stability_msgs.add(digest.toString());
public void missingMessageReceived(long seqno, Message msg) { if(stats) { missing_msgs_received++; updateStats(received, msg.getSrc(), 0, 0, 1); MissingMessage missing=new MissingMessage(msg.getSrc(), seqno); receive_history.add(missing); } } /* ------------------- End of Interface NakReceiverWindow.Listener ------------------- */
protected void queueMessage(Message msg, long seqno) { if(become_server_queue != null) { become_server_queue.add(msg); log.trace("%s: message %s::%d was added to queue (not yet server)", local_addr, msg.getSrc(), seqno); } else log.trace("%s: message %s::%d was discarded (not yet server)", local_addr, msg.getSrc(), seqno); }
void suspect(Address mbr) { Message suspect_msg=new Message(); suspect_msg.setFlag(Message.OOB); Header hdr=new Header(Header.SUSPECT, mbr); suspect_msg.putHeader(name, hdr); down_prot.down(new Event(Event.MSG, suspect_msg)); num_suspect_events++; suspect_history.add(mbr); }