private void limitFailedBuffersSize() { if (failedBuffers.size() >= config.getBatchQueueSizeLimit()) { failedBuffers.removeFirst(); approximateFailedBuffersCount.decrementAndGet(); droppedBuffers.incrementAndGet(); log.error( "failedBuffers queue size reached the limit [%d], dropping the oldest failed buffer", config.getBatchQueueSizeLimit() ); } }
public HttpPostEmitter(HttpEmitterConfig config, AsyncHttpClient client, ObjectMapper jsonMapper) batchingStrategy = config.getBatchingStrategy(); final int batchOverhead = batchingStrategy.batchStartLength() + batchingStrategy.batchEndLength(); Preconditions.checkArgument( config.getMaxBatchSize() >= MAX_EVENT_SIZE + batchOverhead, StringUtils.format( "maxBatchSize must be greater than MAX_EVENT_SIZE[%,d] + overhead[%,d].", this.bufferSize = config.getMaxBatchSize(); this.maxBufferWatermark = bufferSize - batchingStrategy.batchEndLength(); this.jsonMapper = jsonMapper; try { this.url = new URL(config.getRecipientBaseUrl()).toString(); throw new ISE(e, "Bad URL: %s", config.getRecipientBaseUrl());
private void unlockAndSealIfNeeded() { if (eventCount.incrementAndGet() >= emitter.config.getFlushCount()) { unlockAndSeal(); } else { long timeSinceFirstEvent = System.currentTimeMillis() - firstEventTimestamp; if (firstEventTimestamp > 0 && timeSinceFirstEvent > emitter.config.getFlushMillis()) { unlockAndSeal(); } else { unlock(); } } }
@Test public void testDefaultsLegacy() { final Properties props = new Properties(); props.put("org.apache.druid.java.util.emitter.http.url", "http://example.com/"); final ObjectMapper objectMapper = new ObjectMapper(); final HttpEmitterConfig config = objectMapper.convertValue(Emitters.makeHttpMap(props), HttpEmitterConfig.class); Assert.assertEquals(60000, config.getFlushMillis()); Assert.assertEquals(300, config.getFlushCount()); Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); Assert.assertEquals(null, config.getBasicAuthentication()); Assert.assertEquals(BatchingStrategy.ARRAY, config.getBatchingStrategy()); Pair<Integer, Integer> batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( Runtime.getRuntime().maxMemory() ); Assert.assertEquals(batchConfigPair.lhs.intValue(), config.getMaxBatchSize()); Assert.assertEquals(batchConfigPair.rhs.intValue(), config.getBatchQueueSizeLimit()); Assert.assertEquals(Long.MAX_VALUE, config.getFlushTimeOut()); Assert.assertEquals(2.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); Assert.assertEquals(0, config.getMinHttpTimeoutMillis()); }
if (timeoutMillis < config.getMinHttpTimeoutMillis()) { throw timeoutLessThanMinimumException; byte[] payload; int payloadLength; ContentEncoding contentEncoding = config.getContentEncoding(); if (contentEncoding != null) { switch (contentEncoding) { request.setBody(ByteBuffer.wrap(payload, 0, payloadLength)); if (config.getBasicAuthentication() != null) { final String[] parts = config.getBasicAuthentication().split(":", 2); final String user = parts[0]; final String password = parts.length > 1 ? parts[1] : ""; "Received HTTP status 413 from [%s]. Batch size of [%d] may be too large, " + "try adjusting maxBatchSizeBatch property", config.getRecipientBaseUrl(), config.getMaxBatchSize() );
void sealIfFlushNeeded() { long timeSinceFirstEvent = System.currentTimeMillis() - firstEventTimestamp; if (firstEventTimestamp > 0 && timeSinceFirstEvent > emitter.config.getFlushMillis()) { seal(); } }
EmittingThread(HttpEmitterConfig config) { super("HttpPostEmitter-" + instanceCounter.incrementAndGet()); setDaemon(true); timeoutLessThanMinimumException = new TimeoutException( "Timeout less than minimum [" + config.getMinHttpTimeoutMillis() + "] ms." ); // To not showing and writing nonsense and misleading stack trace in logs. timeoutLessThanMinimumException.setStackTrace(new StackTraceElement[]{}); }
private void flush(Batch batch) throws IOException { if (batch == null) { return; } batch.seal(); try { // This check doesn't always awaits for this exact batch to be emitted, because another batch could be dropped // from the queue ahead of this one, in limitBuffersToEmitSize(). But there is no better way currently to wait for // the exact batch, and it's not that important. emittedBatchCounter.awaitCount(batch.batchNumber, config.getFlushTimeOut(), TimeUnit.MILLISECONDS); } catch (TimeoutException e) { String message = StringUtils.format("Timed out after [%d] millis during flushing", config.getFlushTimeOut()); throw new IOException(message, e); } catch (InterruptedException e) { log.debug("Thread Interrupted"); Thread.currentThread().interrupt(); throw new IOException("Thread Interrupted while flushing", e); } }
log.debug(e, "Failed to send events to url[%s] with timeout less than minimum", config.getRecipientBaseUrl()); } else { log.error(e, "Failed to send events to url[%s]", config.getRecipientBaseUrl());
public HttpEmitterConfig build() { return new HttpEmitterConfig(this, recipientBaseUrl); } }
@Test public void testSettingEverythingLegacy() { final Properties props = new Properties(); props.setProperty("org.apache.druid.java.util.emitter.flushMillis", "1"); props.setProperty("org.apache.druid.java.util.emitter.flushCount", "2"); props.setProperty("org.apache.druid.java.util.emitter.http.url", "http://example.com/"); props.setProperty("org.apache.druid.java.util.emitter.http.basicAuthentication", "a:b"); props.setProperty("org.apache.druid.java.util.emitter.http.batchingStrategy", "newlines"); props.setProperty("org.apache.druid.java.util.emitter.http.maxBatchSize", "4"); props.setProperty("org.apache.druid.java.util.emitter.http.flushTimeOut", "1000"); props.setProperty("org.apache.druid.java.util.emitter.http.batchQueueSizeLimit", "2500"); props.setProperty("org.apache.druid.java.util.emitter.http.httpTimeoutAllowanceFactor", "3.0"); props.setProperty("org.apache.druid.java.util.emitter.http.minHttpTimeoutMillis", "100"); final ObjectMapper objectMapper = new ObjectMapper(); final HttpEmitterConfig config = objectMapper.convertValue(Emitters.makeHttpMap(props), HttpEmitterConfig.class); Assert.assertEquals(1, config.getFlushMillis()); Assert.assertEquals(2, config.getFlushCount()); Assert.assertEquals("http://example.com/", config.getRecipientBaseUrl()); Assert.assertEquals("a:b", config.getBasicAuthentication()); Assert.assertEquals(BatchingStrategy.NEWLINES, config.getBatchingStrategy()); Assert.assertEquals(4, config.getMaxBatchSize()); Assert.assertEquals(1000, config.getFlushTimeOut()); Assert.assertEquals(2500, config.getBatchQueueSizeLimit()); Assert.assertEquals(3.0f, config.getHttpTimeoutAllowanceFactor(), 0.0f); Assert.assertEquals(100, config.getMinHttpTimeoutMillis()); }
if (timeoutMillis < config.getMinHttpTimeoutMillis()) { throw timeoutLessThanMinimumException; byte[] payload; int payloadLength; ContentEncoding contentEncoding = config.getContentEncoding(); if (contentEncoding != null) { switch (contentEncoding) { request.setBody(ByteBuffer.wrap(payload, 0, payloadLength)); if (config.getBasicAuthentication() != null) { final String[] parts = config.getBasicAuthentication().split(":", 2); final String user = parts[0]; final String password = parts.length > 1 ? parts[1] : ""; "Received HTTP status 413 from [%s]. Batch size of [%d] may be too large, " + "try adjusting maxBatchSizeBatch property", config.getRecipientBaseUrl(), config.getMaxBatchSize() );
@Override public void run() { while (true) { boolean needsToShutdown = needsToShutdown(); try { emitLargeEvents(); emitBatches(); tryEmitOneFailedBuffer(); if (needsToShutdown) { tryEmitAndDrainAllFailedBuffers(); // Make GC life easier drainBuffersToReuse(); return; } } catch (Throwable t) { log.error(t, "Uncaught exception in EmittingThread.run()"); } if (failedBuffers.isEmpty()) { // Waiting for 1/2 of config.getFlushMillis() in order to flush events not more than 50% later than specified. // If nanos=0 parkNanos() doesn't wait at all, then we don't want. long waitNanos = Math.max(TimeUnit.MILLISECONDS.toNanos(config.getFlushMillis()) / 2, 1); LockSupport.parkNanos(HttpPostEmitter.this, waitNanos); } } }
EmittingThread(HttpEmitterConfig config) { super("HttpPostEmitter-" + instanceCounter.incrementAndGet()); setDaemon(true); timeoutLessThanMinimumException = new TimeoutException( "Timeout less than minimum [" + config.getMinHttpTimeoutMillis() + "] ms." ); // To not showing and writing nonsense and misleading stack trace in logs. timeoutLessThanMinimumException.setStackTrace(new StackTraceElement[]{}); }
private void flush(Batch batch) throws IOException { if (batch == null) { return; } batch.seal(); try { // This check doesn't always awaits for this exact batch to be emitted, because another batch could be dropped // from the queue ahead of this one, in limitBuffersToEmitSize(). But there is no better way currently to wait for // the exact batch, and it's not that important. emittedBatchCounter.awaitCount(batch.batchNumber, config.getFlushTimeOut(), TimeUnit.MILLISECONDS); } catch (TimeoutException e) { String message = StringUtils.format("Timed out after [%d] millis during flushing", config.getFlushTimeOut()); throw new IOException(message, e); } catch (InterruptedException e) { log.debug("Thread Interrupted"); Thread.currentThread().interrupt(); throw new IOException("Thread Interrupted while flushing", e); } }
log.debug(e, "Failed to send events to url[%s] with timeout less than minimum", config.getRecipientBaseUrl()); } else { log.error(e, "Failed to send events to url[%s]", config.getRecipientBaseUrl());
public HttpEmitterConfig buildHttpEmitterConfig(String baseUri) { return new HttpEmitterConfig(httpEmittingConfig, baseUri); }
@Test public void testDefaults() { final Properties props = new Properties(); final ObjectMapper objectMapper = new ObjectMapper(); final ParametrizedUriEmitterConfig paramConfig = objectMapper.convertValue(Emitters.makeCustomFactoryMap(props), ParametrizedUriEmitterConfig.class); final HttpEmitterConfig config = paramConfig.buildHttpEmitterConfig("http://example.com/topic"); Assert.assertEquals(60000, config.getFlushMillis()); Assert.assertEquals(500, config.getFlushCount()); Assert.assertEquals("http://example.com/topic", config.getRecipientBaseUrl()); Assert.assertEquals(null, config.getBasicAuthentication()); Assert.assertEquals(BatchingStrategy.ARRAY, config.getBatchingStrategy()); Pair<Integer, Integer> batchConfigPair = BaseHttpEmittingConfig.getDefaultBatchSizeAndLimit( Runtime.getRuntime().maxMemory() ); Assert.assertEquals(batchConfigPair.lhs.intValue(), config.getMaxBatchSize()); Assert.assertEquals(batchConfigPair.rhs.intValue(), config.getBatchQueueSizeLimit()); Assert.assertEquals(Long.MAX_VALUE, config.getFlushTimeOut()); }
public HttpPostEmitter(HttpEmitterConfig config, AsyncHttpClient client, ObjectMapper jsonMapper) batchingStrategy = config.getBatchingStrategy(); final int batchOverhead = batchingStrategy.batchStartLength() + batchingStrategy.batchEndLength(); Preconditions.checkArgument( config.getMaxBatchSize() >= MAX_EVENT_SIZE + batchOverhead, StringUtils.format( "maxBatchSize must be greater than MAX_EVENT_SIZE[%,d] + overhead[%,d].", this.bufferSize = config.getMaxBatchSize(); this.maxBufferWatermark = bufferSize - batchingStrategy.batchEndLength(); this.jsonMapper = jsonMapper; try { this.url = new URL(config.getRecipientBaseUrl()).toString(); throw new ISE(e, "Bad URL: %s", config.getRecipientBaseUrl());
private void unlockAndSealIfNeeded() { if (eventCount.incrementAndGet() >= emitter.config.getFlushCount()) { unlockAndSeal(); } else { long timeSinceFirstEvent = System.currentTimeMillis() - firstEventTimestamp; if (firstEventTimestamp > 0 && timeSinceFirstEvent > emitter.config.getFlushMillis()) { unlockAndSeal(); } else { unlock(); } } }