private void handleAuthorizationCheckError( String errorMsg, HttpServletRequest servletRequest, HttpServletResponse servletResponse ) { // Send out an alert so there's a centralized collection point for seeing errors of this nature log.makeAlert(errorMsg) .addData("uri", servletRequest.getRequestURI()) .addData("method", servletRequest.getMethod()) .addData("remoteAddr", servletRequest.getRemoteAddr()) .addData("remoteHost", servletRequest.getRemoteHost()) .emit(); if (servletResponse.isCommitted()) { throw new ISE(errorMsg); } else { try { servletResponse.sendError(Response.SC_FORBIDDEN); } catch (Exception e) { throw new RuntimeException(e); } } }
public AlertBuilder makeAlert(Throwable t, String message, Object... objects) { if (emitter == null) { final String errorMessage = StringUtils.format( "Emitter not initialized! Cannot alert. Please make sure to call %s.registerEmitter()", this.getClass() ); error(errorMessage); throw new ISE(errorMessage); } final AlertBuilder retVal = new EmittingAlertBuilder(t, StringUtils.format(message, objects), emitter) .addData("class", className); if (t != null) { final StringWriter trace = new StringWriter(); final PrintWriter pw = new PrintWriter(trace); t.printStackTrace(pw); retVal.addData("exceptionType", t.getClass()); retVal.addData("exceptionMessage", t.getMessage()); retVal.addData("exceptionStackTrace", trace.toString()); } return retVal; }
private void removeSegment(final Sink sink, final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Unable to remove file for dataSource[%s]", schema.getDataSource()) .addData("file", target) .addData("interval", sink.getInterval()) .emit(); } } } }
private void mainLoop() { try { while (!Thread.currentThread().isInterrupted()) { final Notice notice = notices.take(); try { notice.handle(); } catch (InterruptedException e) { // Will be caught and logged in the outer try block throw e; } catch (Exception e) { log.makeAlert(e, "Failed to handle notice") .addData("noticeClass", notice.getClass().getSimpleName()) .addData("noticeTaskId", notice.getTaskId()) .emit(); } } } catch (InterruptedException e) { log.info("WorkerTaskMonitor interrupted, exiting."); } finally { doneStopping.countDown(); } }
public Set<DataSegment> getOrderedAvailableDataSegments() { Set<DataSegment> availableSegments = Sets.newTreeSet(SEGMENT_COMPARATOR); Iterable<DataSegment> dataSegments = getAvailableDataSegments(); for (DataSegment dataSegment : dataSegments) { if (dataSegment.getSize() < 0) { log.makeAlert("No size on Segment, wtf?") .addData("segment", dataSegment) .emit(); } availableSegments.add(dataSegment); } return availableSegments; }
@Override public void run() { try { while (!Thread.currentThread().isInterrupted()) { final Notice notice = notices.take(); try { notice.handle(); } catch (Throwable e) { log.makeAlert(e, "KafkaSupervisor[%s] failed to handle notice", dataSource) .addData("noticeClass", notice.getClass().getSimpleName()) .emit(); } } } catch (InterruptedException e) { log.info("KafkaSupervisor[%s] interrupted, exiting", dataSource); } } }
protected void reduceLifetimes(String tier) { for (BalancerSegmentHolder holder : currentlyMovingSegments.get(tier).values()) { holder.reduceLifetime(); if (holder.getLifetime() <= 0) { log.makeAlert("[%s]: Balancer move segments queue has a segment stuck", tier) .addData("segment", holder.getSegment().getIdentifier()) .addData("server", holder.getFromServer().getMetadata()) .emit(); } } }
private void removeDirectory(final File target) { if (target.exists()) { try { log.info("Deleting Index File[%s]", target); FileUtils.deleteDirectory(target); } catch (Exception e) { log.makeAlert(e, "Failed to remove directory[%s]", schema.getDataSource()) .addData("file", target) .emit(); } } } }
@Override public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException { log.info("Performing action for task[%s]: %s", task.getId(), taskAction); if (taskAction.isAudited()) { // Add audit log try { storage.addAuditLog(task, taskAction); } catch (Exception e) { final String actionClass = taskAction.getClass().getName(); log.makeAlert(e, "Failed to record action in audit log") .addData("task", task.getId()) .addData("actionClass", actionClass) .emit(); throw new ISE(e, "Failed to record action [%s] in audit log", actionClass); } } return taskAction.perform(task, toolbox); } }
@Override public void run() { while (true) { try { manage(); break; } catch (InterruptedException e) { log.info("Interrupted, exiting!"); break; } catch (Exception e) { final long restartDelay = config.getRestartDelay().getMillis(); log.makeAlert(e, "Failed to manage").addData("restartDelay", restartDelay).emit(); try { Thread.sleep(restartDelay); } catch (InterruptedException e2) { log.info("Interrupted, exiting!"); break; } } } } }
@Override public Void apply(Throwable throwable) { final List<String> segmentIdentifierStrings = Lists.transform( segmentsToPush, new Function<SegmentIdentifier, String>() { @Override public String apply(SegmentIdentifier input) { return input.getIdentifierAsString(); } } ); log.makeAlert(throwable, "Failed to publish merged indexes[%s]", schema.getDataSource()) .addData("segments", segmentIdentifierStrings) .emit(); if (shuttingDown) { // We're trying to shut down, and these segments failed to push. Let's just get rid of them. // This call will also delete possibly-partially-written files, so we don't need to do it explicitly. cleanShutdown = false; for (SegmentIdentifier identifier : segmentsToPush) { dropSegment(identifier); } } return null; } };
@Override public void run() { try { synchronized (segmentDeleteLock) { if (segmentsToDelete.remove(segment)) { segmentManager.dropSegment(segment); File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier()); if (!segmentInfoCacheFile.delete()) { log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile); } } } } catch (Exception e) { log.makeAlert(e, "Failed to remove segment! Possible resource leak!") .addData("segment", segment) .emit(); } } };
private CoordinatorStats assign( final Set<ServerHolder> serverHolders, final DataSegment segment ) { final CoordinatorStats stats = new CoordinatorStats(); stats.addToGlobalStat(LoadRule.ASSIGNED_COUNT, 0); for (ServerHolder holder : serverHolders) { if (segment.getSize() > holder.getAvailableSize()) { log.makeAlert("Failed to broadcast segment for [%s]", segment.getDataSource()) .addData("segmentId", segment.getIdentifier()) .addData("segmentSize", segment.getSize()) .addData("hostName", holder.getServer().getHost()) .addData("availableSize", holder.getAvailableSize()) .emit(); } else { if (!holder.isLoadingSegment(segment)) { holder.getPeon().loadSegment( segment, null ); stats.addToGlobalStat(LoadRule.ASSIGNED_COUNT, 1); } } } return stats; }
@Override public void unannounce(DruidNode service) { final String serviceName = CuratorServiceUtils.makeCanonicalServiceName(service.getServiceName()); final ServiceInstance<Void> instance; synchronized (monitor) { instance = instanceMap.get(serviceName); if (instance == null) { log.warn("Ignoring request to unannounce service[%s]", service); return; } } log.info("Unannouncing service[%s]", service); try { discovery.unregisterService(instance); } catch (Exception e) { log.makeAlert(e, "Failed to unannounce service[%s], zombie znode perhaps in existence.", serviceName) .addData("service", service) .emit(); } finally { synchronized (monitor) { instanceMap.remove(serviceName); } } } }
private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query) { QueryRunner<T> queryRunner = null; final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames()); for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem : ImmutableList.copyOf(runningItems)) { final Task task = taskRunnerWorkItem.getTask(); if (task.getDataSource().equals(queryDataSource)) { final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query); if (taskQueryRunner != null) { if (queryRunner == null) { queryRunner = taskQueryRunner; } else { log.makeAlert("Found too many query runners for datasource") .addData("dataSource", queryDataSource) .emit(); } } } } return new SetAndVerifyContextQueryRunner( serverConfig, queryRunner == null ? new NoopQueryRunner<T>() : queryRunner ); }
private void addSegment(final SegmentIdentifier identifier) { segments.put(identifier.getInterval().getStartMillis(), identifier); try { segmentAnnouncer.announceSegment( new DataSegment( identifier.getDataSource(), identifier.getInterval(), identifier.getVersion(), ImmutableMap.<String, Object>of(), ImmutableList.<String>of(), ImmutableList.<String>of(), identifier.getShardSpec(), null, 0 ) ); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", identifier.getDataSource()) .addData("interval", identifier.getInterval()) .emit(); } }
private void sendResetRequestAndWait(Map<TopicPartition, Long> outOfRangePartitions, TaskToolbox taskToolbox) throws IOException { Map<Integer, Long> partitionOffsetMap = Maps.newHashMap(); for (Map.Entry<TopicPartition, Long> outOfRangePartition : outOfRangePartitions.entrySet()) { partitionOffsetMap.put(outOfRangePartition.getKey().partition(), outOfRangePartition.getValue()); } boolean result = taskToolbox.getTaskActionClient() .submit(new ResetDataSourceMetadataAction( getDataSource(), new KafkaDataSourceMetadata(new KafkaPartitions( ioConfig.getStartPartitions() .getTopic(), partitionOffsetMap )) )); if (result) { log.makeAlert("Resetting Kafka offsets for datasource [%s]", getDataSource()) .addData("partitions", partitionOffsetMap.keySet()) .emit(); // wait for being killed by supervisor requestPause(); } else { log.makeAlert("Failed to send reset request for partitions [%s]", partitionOffsetMap.keySet()).emit(); } }
private void update(String tier, ReplicatorSegmentHolder holder, Map<String, Boolean> lookup, String type) { int size = holder.getNumProcessing(tier); if (size != 0) { log.info( "[%s]: Replicant %s queue still has %d segments. Lifetime[%d]. Segments %s", tier, type, size, holder.getLifetime(tier), holder.getCurrentlyProcessingSegmentsAndHosts(tier) ); holder.reduceLifetime(tier); lookup.put(tier, false); if (holder.getLifetime(tier) < 0) { log.makeAlert("[%s]: Replicant %s queue stuck after %d+ runs!", tier, type, maxLifetime) .addData("segments", holder.getCurrentlyProcessingSegmentsAndHosts(tier)) .emit(); } } else { log.info("[%s]: Replicant %s queue is empty.", tier, type); lookup.put(tier, true); holder.resetLifetime(tier); } }
private void addSink(final Sink sink) { sinks.put(sink.getInterval().getStartMillis(), sink); metrics.setSinkCount(sinks.size()); sinkTimeline.add( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<Sink>(sink) ); try { segmentAnnouncer.announceSegment(sink.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }