@Override public void onTaskConfigUpdate(Collection<ConnectorTaskId> tasks) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
@Override public void onConnectorConfigUpdate(String connector) { // TODO: move connector configuration update handling here to be consistent with // the semantics of the config backing store synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
@Override public void onConnectorConfigRemove(String connector) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); } }
@Override public void onConnectorTargetStateChange(String connector) { synchronized (StandaloneHerder.this) { configState = configBackingStore.snapshot(); TargetState targetState = configState.targetState(connector); worker.setTargetState(connector, targetState); if (targetState == TargetState.STARTED) updateConnectorTasks(connector); } } }
private void updateDeletedConnectorStatus() { ClusterConfigState snapshot = configBackingStore.snapshot(); Set<String> connectors = snapshot.connectors(); for (String connector : statusBackingStore.connectors()) { if (!connectors.contains(connector)) { log.debug("Cleaning status information for connector {}", connector); onDeletion(connector); } } }
@Override public List<ProtocolMetadata> metadata() { configSnapshot = configStorage.snapshot(); ConnectProtocol.WorkerState workerState = new ConnectProtocol.WorkerState(restUrl, configSnapshot.offset()); ByteBuffer metadata = ConnectProtocol.serializeMetadata(workerState); return Collections.singletonList(new ProtocolMetadata(DEFAULT_SUBPROTOCOL, metadata)); }
private Long ensureLeaderConfig(long maxOffset) { // If this leader is behind some other members, we can't do assignment if (configSnapshot.offset() < maxOffset) { // We might be able to take a new snapshot to catch up immediately and avoid another round of syncing here. // Alternatively, if this node has already passed the maximum reported by any other member of the group, it // is also safe to use this newer state. ClusterConfigState updatedSnapshot = configStorage.snapshot(); if (updatedSnapshot.offset() < maxOffset) { log.info("Was selected to perform assignments, but do not have latest config found in sync request. " + "Returning an empty configuration to trigger re-sync."); return null; } else { configSnapshot = updatedSnapshot; return configSnapshot.offset(); } } return maxOffset; }
/** * Try to read to the end of the config log within the given timeout * @param timeoutMs maximum time to wait to sync to the end of the log * @return true if successful, false if timed out */ private boolean readConfigToEnd(long timeoutMs) { log.info("Current config state offset {} is behind group assignment {}, reading to end of config log", configState.offset(), assignment.offset()); try { configBackingStore.refresh(timeoutMs, TimeUnit.MILLISECONDS); configState = configBackingStore.snapshot(); log.info("Finished reading to end of log and updated config snapshot, new config log offset: {}", configState.offset()); return true; } catch (TimeoutException e) { // in case reading the log takes too long, leave the group to ensure a quick rebalance (although by default we should be out of the group already) // and back off to avoid a tight loop of rejoin-attempt-to-catch-up-leave log.warn("Didn't reach end of config log quickly enough", e); member.maybeLeaveGroup(); backoff(workerUnsyncBackoffMs); return false; } }
configState = configBackingStore.snapshot();