throw new ISE(e, "Bad URL: %s", config.getRecipientBaseUrl()); emittingThread = new EmittingThread(config); long firstBatchNumber = 1; concurrentBatch.set(new Batch(this, acquireBuffer(), firstBatchNumber));
@Override public void run() { while (true) { boolean needsToShutdown = needsToShutdown(); try { emitLargeEvents(); emitBatches(); tryEmitOneFailedBuffer(); if (needsToShutdown) { tryEmitAndDrainAllFailedBuffers(); // Make GC life easier drainBuffersToReuse(); return; } } catch (Throwable t) { log.error(t, "Uncaught exception in EmittingThread.run()"); } if (failedBuffers.isEmpty()) { // Waiting for 1/2 of config.getFlushMillis() in order to flush events not more than 50% later than specified. // If nanos=0 parkNanos() doesn't wait at all, then we don't want. long waitNanos = Math.max(TimeUnit.MILLISECONDS.toNanos(config.getFlushMillis()) / 2, 1); LockSupport.parkNanos(HttpPostEmitter.this, waitNanos); } } }
@SuppressWarnings("ArrayEquality") private void emitLargeEvents() { if (largeEventsToEmit.isEmpty()) { return; } // Don't try to emit large events until exhaustion, to avoid starvation of "normal" batches, if large event // posting rate is too high, though it should never happen in practice. largeEventsToEmit.add(LARGE_EVENTS_STOP); for (byte[] largeEvent; (largeEvent = largeEventsToEmit.poll()) != LARGE_EVENTS_STOP; ) { emitLargeEvent(largeEvent); approximateBuffersToEmitCount.decrementAndGet(); approximateLargeEventsToEmitCount.decrementAndGet(); approximateEventsToEmitCount.decrementAndGet(); } }
private void send(byte[] buffer, int length) throws Exception final long timeoutMillis = sendRequestTimeoutMillis(lastFillTimeMillis); if (timeoutMillis < config.getMinHttpTimeoutMillis()) { throw timeoutLessThanMinimumException; switch (contentEncoding) { case GZIP: try (GZIPOutputStream gzipOutputStream = acquireGzipOutputStream(length)) { gzipOutputStream.write(buffer, 0, length); accountFailedSending(sendingStartMs); if (e.getCause() instanceof TimeoutException) { log.error( accountFailedSending(sendingStartMs); throw new ISE( "Received HTTP status 413 from [%s]. Batch size of [%d] may be too large, " accountFailedSending(sendingStartMs); throw new ISE( "Emissions of events not successful[%d: %s], with message[%s].", accountSuccessfulSending(sendingStartMs);
throw new ISE(e, "Bad URL: %s", config.getRecipientBaseUrl()); emittingThread = new EmittingThread(config); long firstBatchNumber = 1; concurrentBatch.set(new Batch(this, acquireBuffer(), firstBatchNumber));
@Override public void run() { while (true) { boolean needsToShutdown = needsToShutdown(); try { emitLargeEvents(); emitBatches(); tryEmitOneFailedBuffer(); if (needsToShutdown) { tryEmitAndDrainAllFailedBuffers(); // Make GC life easier drainBuffersToReuse(); return; } } catch (Throwable t) { log.error(t, "Uncaught exception in EmittingThread.run()"); } if (failedBuffers.isEmpty()) { // Waiting for 1/2 of config.getFlushMillis() in order to flush events not more than 50% later than specified. // If nanos=0 parkNanos() doesn't wait at all, then we don't want. long waitNanos = Math.max(TimeUnit.MILLISECONDS.toNanos(config.getFlushMillis()) / 2, 1); LockSupport.parkNanos(HttpPostEmitter.this, waitNanos); } } }
@Override @LifecycleStop public void close() throws IOException { synchronized (startLock) { if (running) { running = false; Object lastBatch = concurrentBatch.getAndSet(null); if (lastBatch instanceof Batch) { flush((Batch) lastBatch); } emittingThread.shuttingDown = true; // EmittingThread is interrupted after the last batch is flushed. emittingThread.interrupt(); } } }
@SuppressWarnings("ArrayEquality") private void emitLargeEvents() { if (largeEventsToEmit.isEmpty()) { return; } // Don't try to emit large events until exhaustion, to avoid starvation of "normal" batches, if large event // posting rate is too high, though it should never happen in practice. largeEventsToEmit.add(LARGE_EVENTS_STOP); for (byte[] largeEvent; (largeEvent = largeEventsToEmit.poll()) != LARGE_EVENTS_STOP; ) { emitLargeEvent(largeEvent); approximateBuffersToEmitCount.decrementAndGet(); approximateLargeEventsToEmitCount.decrementAndGet(); approximateEventsToEmitCount.decrementAndGet(); } }
@VisibleForTesting void joinEmitterThread() throws InterruptedException { emittingThread.join(); } }
private void emitBatches() { for (Batch batch; (batch = pollBatchFromEmitQueue()) != null; ) { emit(batch); } }
private void send(byte[] buffer, int length) throws Exception final long timeoutMillis = computeTimeoutForSendRequestInMillis(lastFillTimeMillis); if (timeoutMillis < config.getMinHttpTimeoutMillis()) { throw timeoutLessThanMinimumException; switch (contentEncoding) { case GZIP: try (GZIPOutputStream gzipOutputStream = acquireGzipOutputStream(length)) { gzipOutputStream.write(buffer, 0, length); accountFailedSending(sendingStartMs); if (e.getCause() instanceof TimeoutException) { log.error( accountFailedSending(sendingStartMs); throw new ISE( "Received HTTP status 413 from [%s]. Batch size of [%d] may be too large, " accountFailedSending(sendingStartMs); throw new ISE( "Emissions of events not successful[%d: %s], with message[%s].", accountSuccessfulSending(sendingStartMs);
@Override @LifecycleStop public void close() throws IOException { synchronized (startLock) { if (running) { running = false; Object lastBatch = concurrentBatch.getAndSet(null); if (lastBatch instanceof Batch) { flush((Batch) lastBatch); } emittingThread.shuttingDown = true; // EmittingThread is interrupted after the last batch is flushed. emittingThread.interrupt(); } } }
private void emitBatches() { for (Batch batch; (batch = pollBatchFromEmitQueue()) != null; ) { emit(batch); } }
@VisibleForTesting void joinEmitterThread() throws InterruptedException { emittingThread.join(); } }