@Override protected void finalize() throws Throwable { if (!emitted) { logIt("Alert not emitted, emitting. %s: %s"); super.emit(); } }
@Override public void emit() { logIt("%s: %s"); emitted = true; super.emit(); }
@Override public ScheduledExecutors.Signal call() { try { syncFromStorage(); } catch (Exception e) { if (active) { log.makeAlert(e, "Failed to sync with storage").emit(); } } if (active) { return ScheduledExecutors.Signal.REPEAT; } else { return ScheduledExecutors.Signal.STOP; } } }
private static List<ServerHolder> getFilteredHolders( final String tier, final DruidCluster druidCluster, final Predicate<ServerHolder> predicate ) { final NavigableSet<ServerHolder> queue = druidCluster.getHistoricalsByTier(tier); if (queue == null) { log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit(); return Collections.emptyList(); } return queue.stream().filter(predicate).collect(Collectors.toList()); }
@Override public void onFailure(Throwable t) { backgroundManagerExitedLatch.countDown(); if (backgroundManagerFuture.isCancelled()) { LOG.debug("Exited background lookup manager due to cancellation."); } else { LOG.makeAlert(t, "Background lookup manager exited with error!").emit(); } } }
public Set<DataSegment> getOrderedAvailableDataSegments() { Set<DataSegment> availableSegments = Sets.newTreeSet(SEGMENT_COMPARATOR); Iterable<DataSegment> dataSegments = getAvailableDataSegments(); for (DataSegment dataSegment : dataSegments) { if (dataSegment.getSize() < 0) { log.makeAlert("No size on Segment, wtf?") .addData("segment", dataSegment) .emit(); } availableSegments.add(dataSegment); } return availableSegments; }
public Server pickDefaultServer() { Server server = findDefaultServer(); if (server == null) { log.makeAlert( "Catastrophic failure! No servers found at all! Failing request!" ).emit(); throw new ISE("No default server found!"); } return server; }
private void removeDirectory(final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Failed to remove directory[%s]", schema.getDataSource()) .addData("file", target) .emit(); } } } }
@Override public void handleAuthenticatorUpdate(String authenticatorPrefix, byte[] serializedUserMap) { LOG.debug("Received cache update for authenticator [%s].", authenticatorPrefix); Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)); try { cachedUserMaps.put( authenticatorPrefix, objectMapper.readValue( serializedUserMap, BasicAuthUtils.AUTHENTICATOR_USER_MAP_TYPE_REFERENCE ) ); if (commonCacheConfig.getCacheDirectory() != null) { writeUserMapToDisk(authenticatorPrefix, serializedUserMap); } } catch (Exception e) { LOG.makeAlert(e, "WTF? Could not deserialize user map received from coordinator.").emit(); } }
@Override public void notLeader() { try { if (!leader) { log.warn("I'm being asked to stop being leader. But I am not the leader. Ignored event."); return; } leader = false; listener.stopBeingLeader(); } catch (Exception ex) { log.makeAlert(ex, "listener.stopBeingLeader() failed. Unable to stopBeingLeader").emit(); } } },
private void removeSegment(final Sink sink, final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Unable to remove file for dataSource[%s]", schema.getDataSource()) .addData("file", target) .addData("interval", sink.getInterval()) .emit(); } } } }
@Override public void run() { log.makeAlert( "RealtimeIndexTask for dataSource [%s] hasn't finished in configured time [%d] ms.", spec.getDataSchema().getDataSource(), spec.getTuningConfig().getAlertTimeout() ).emit(); } },
private SortedMap<DruidServer, List<SegmentDescriptor>> groupSegmentsByServer(Set<ServerToSegment> segments) { final SortedMap<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap(); for (ServerToSegment serverToSegment : segments) { final QueryableDruidServer queryableDruidServer = serverToSegment.getServer().pick(); if (queryableDruidServer == null) { log.makeAlert( "No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!", serverToSegment.getSegmentDescriptor(), query.getDataSource() ).emit(); } else { final DruidServer server = queryableDruidServer.getServer(); serverSegments.computeIfAbsent(server, s -> new ArrayList<>()).add(serverToSegment.getSegmentDescriptor()); } } return serverSegments; }
protected void reduceLifetimes(String tier) { for (BalancerSegmentHolder holder : currentlyMovingSegments.get(tier).values()) { holder.reduceLifetime(); if (holder.getLifetime() <= 0) { log.makeAlert("[%s]: Balancer move segments queue has a segment stuck", tier) .addData("segment", holder.getSegment().getIdentifier()) .addData("server", holder.getFromServer().getMetadata()) .emit(); } } }
public <T> Server pickServer(Query<T> query) { Server server = findServer(query); if (server == null) { log.makeAlert( "Catastrophic failure! No servers found at all! Failing request!" ).emit(); throw new ISE("No server found for query[%s]", query); } log.debug("Selected [%s]", server.getHost()); return server; }
@Override public void run() { try { synchronized (segmentDeleteLock) { if (segmentsToDelete.remove(segment)) { segmentManager.dropSegment(segment); File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier()); if (!segmentInfoCacheFile.delete()) { log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile); } } } } catch (Exception e) { log.makeAlert(e, "Failed to remove segment! Possible resource leak!") .addData("segment", segment) .emit(); } } };
public Server findServerAvatica(String connectionId) { Server chosenServer = avaticaConnectionBalancer.pickServer(getAllServers(), connectionId); if (chosenServer == null) { log.makeAlert( "Catastrophic failure! No servers found at all! Failing request!" ).emit(); throw new ISE("No server found for Avatica request with connectionId[%s]", connectionId); } log.debug( "Balancer class [%s] sending request with connectionId[%s] to server: %s", avaticaConnectionBalancer.getClass(), connectionId, chosenServer.getHost() ); return chosenServer; }
@Override public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException { log.info("Performing action for task[%s]: %s", task.getId(), taskAction); if (taskAction.isAudited()) { // Add audit log try { storage.addAuditLog(task, taskAction); } catch (Exception e) { final String actionClass = taskAction.getClass().getName(); log.makeAlert(e, "Failed to record action in audit log") .addData("task", task.getId()) .addData("actionClass", actionClass) .emit(); throw new ISE(e, "Failed to record action [%s] in audit log", actionClass); } } return taskAction.perform(task, toolbox); } }
private void addSink(final Sink sink) { sinks.put(sink.getInterval().getStartMillis(), sink); metrics.setSinkCount(sinks.size()); sinkTimeline.add( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<Sink>(sink) ); try { segmentAnnouncer.announceSegment(sink.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }
@Override public void onFailure(final Throwable t) { log.makeAlert(t, "Failed to run task") .addData("task", task.getId()) .addData("type", task.getType()) .addData("dataSource", task.getDataSource()) .emit(); handleStatus(TaskStatus.failure(task.getId())); }