private void handleAuthorizationCheckError( String errorMsg, HttpServletRequest servletRequest, HttpServletResponse servletResponse ) { // Send out an alert so there's a centralized collection point for seeing errors of this nature log.makeAlert(errorMsg) .addData("uri", servletRequest.getRequestURI()) .addData("method", servletRequest.getMethod()) .addData("remoteAddr", servletRequest.getRemoteAddr()) .addData("remoteHost", servletRequest.getRemoteHost()) .emit(); if (servletResponse.isCommitted()) { throw new ISE(errorMsg); } else { try { servletResponse.sendError(Response.SC_FORBIDDEN); } catch (Exception e) { throw new RuntimeException(e); } } }
public static AlertBuilder createEmittable(ServiceEmitter emitter, String descriptionFormat, Object... objects) { return new AlertBuilder(StringUtils.format(descriptionFormat, objects), emitter); }
public static AlertBuilder create(String descriptionFormat, Object... objects) { return AlertBuilder.createEmittable(null, descriptionFormat, objects); }
@Override protected void finalize() throws Throwable { if (!emitted) { logIt("Alert not emitted, emitting. %s: %s"); super.emit(); } }
public AlertBuilder makeAlert(Throwable t, String message, Object... objects) { if (emitter == null) { final String errorMessage = StringUtils.format( "Emitter not initialized! Cannot alert. Please make sure to call %s.registerEmitter()", this.getClass() ); error(errorMessage); throw new ISE(errorMessage); } final AlertBuilder retVal = new EmittingAlertBuilder(t, StringUtils.format(message, objects), emitter) .addData("class", className); if (t != null) { final StringWriter trace = new StringWriter(); final PrintWriter pw = new PrintWriter(trace); t.printStackTrace(pw); retVal.addData("exceptionType", t.getClass()); retVal.addData("exceptionMessage", t.getMessage()); retVal.addData("exceptionStackTrace", trace.toString()); } return retVal; }
@Override public void emit() { logIt("%s: %s"); emitted = true; super.emit(); }
public Set<DataSegment> getOrderedAvailableDataSegments() { Set<DataSegment> availableSegments = Sets.newTreeSet(SEGMENT_COMPARATOR); Iterable<DataSegment> dataSegments = getAvailableDataSegments(); for (DataSegment dataSegment : dataSegments) { if (dataSegment.getSize() < 0) { log.makeAlert("No size on Segment, wtf?") .addData("segment", dataSegment) .emit(); } availableSegments.add(dataSegment); } return availableSegments; }
@Override public ScheduledExecutors.Signal call() { try { syncFromStorage(); } catch (Exception e) { if (active) { log.makeAlert(e, "Failed to sync with storage").emit(); } } if (active) { return ScheduledExecutors.Signal.REPEAT; } else { return ScheduledExecutors.Signal.STOP; } } }
public static void notifyLocationChanged( final Iterable<Pair<TaskRunnerListener, Executor>> listeners, final String taskId, final TaskLocation location ) { log.info("Task [%s] location changed to [%s].", taskId, location); for (final Pair<TaskRunnerListener, Executor> listener : listeners) { try { listener.rhs.execute( new Runnable() { @Override public void run() { listener.lhs.locationChanged(taskId, location); } } ); } catch (Exception e) { log.makeAlert(e, "Unable to notify task listener") .addData("taskId", taskId) .addData("taskLocation", location) .addData("listener", listener.toString()) .emit(); } } }
private static List<ServerHolder> getFilteredHolders( final String tier, final DruidCluster druidCluster, final Predicate<ServerHolder> predicate ) { final NavigableSet<ServerHolder> queue = druidCluster.getHistoricalsByTier(tier); if (queue == null) { log.makeAlert("Tier[%s] has no servers! Check your cluster configuration!", tier).emit(); return Collections.emptyList(); } return queue.stream().filter(predicate).collect(Collectors.toList()); }
private void removeSegment(final Sink sink, final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Unable to remove file for dataSource[%s]", schema.getDataSource()) .addData("file", target) .addData("interval", sink.getInterval()) .emit(); } } } }
@Override public void run() { // poll() is synchronized together with start(), stop() and isStarted() to ensure that when stop() exists, // poll() won't actually run anymore after that (it could only enter the syncrhonized section and exit // immediately because the localStartedOrder doesn't match the new currentStartOrder). It's needed // to avoid flakiness in SQLMetadataSegmentManagerTest. // See https://github.com/apache/incubator-druid/issues/6028 readLock.lock(); try { if (localStartOrder == currentStartOrder) { poll(); } } catch (Exception e) { log.makeAlert(e, "uncaught exception in segment manager polling thread").emit(); } finally { readLock.unlock(); } } },
@Override public void onFailure(Throwable t) { backgroundManagerExitedLatch.countDown(); if (backgroundManagerFuture.isCancelled()) { LOG.debug("Exited background lookup manager due to cancellation."); } else { LOG.makeAlert(t, "Background lookup manager exited with error!").emit(); } } }
@Override public Iterator<ServerHolder> pickServersToDrop(DataSegment toDrop, NavigableSet<ServerHolder> serverHolders) { List<ListenableFuture<Pair<Double, ServerHolder>>> futures = Lists.newArrayList(); for (final ServerHolder server : serverHolders) { futures.add( exec.submit( () -> Pair.of(computeCost(toDrop, server, true), server) ) ); } final ListenableFuture<List<Pair<Double, ServerHolder>>> resultsFuture = Futures.allAsList(futures); try { // results is an un-ordered list of a pair consisting of the 'cost' of a segment being on a server and the server List<Pair<Double, ServerHolder>> results = resultsFuture.get(); return results.stream() // Comparator.comapringDouble will order by lowest cost... // reverse it because we want to drop from the highest cost servers first .sorted(Comparator.comparingDouble((Pair<Double, ServerHolder> o) -> o.lhs).reversed()) .map(x -> x.rhs).collect(Collectors.toList()) .iterator(); } catch (Exception e) { log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit(); } return Collections.emptyIterator(); }
@Override public void run() { try { while (!Thread.currentThread().isInterrupted()) { final Notice notice = notices.take(); try { notice.handle(); } catch (Throwable e) { log.makeAlert(e, "KafkaSupervisor[%s] failed to handle notice", dataSource) .addData("noticeClass", notice.getClass().getSimpleName()) .emit(); } } } catch (InterruptedException e) { log.info("KafkaSupervisor[%s] interrupted, exiting", dataSource); } } }
log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit();
private void mainLoop() { try { while (!Thread.currentThread().isInterrupted()) { final Notice notice = notices.take(); try { notice.handle(); } catch (InterruptedException e) { // Will be caught and logged in the outer try block throw e; } catch (Exception e) { log.makeAlert(e, "Failed to handle notice") .addData("noticeClass", notice.getClass().getSimpleName()) .addData("noticeTaskId", notice.getTaskId()) .emit(); } } } catch (InterruptedException e) { log.info("WorkerTaskMonitor interrupted, exiting."); } finally { doneStopping.countDown(); } }
public Server pickDefaultServer() { Server server = findDefaultServer(); if (server == null) { log.makeAlert( "Catastrophic failure! No servers found at all! Failing request!" ).emit(); throw new ISE("No default server found!"); } return server; }