Refine search
GenericRecord record = (GenericRecord) message; populateAvroHeaders(headers, record.getSchema()); events.add(EventBuilder.withBody(serialize(record, record.getSchema()), headers)); events.add(EventBuilder.withBody(serialize(message, schema), headers)); headersWithEncoding.put(Log4jAvroHeaders.MESSAGE_ENCODING.toString(), "UTF8"); events.add(EventBuilder.withBody(msg, Charset.forName("UTF8"), headersWithEncoding));
@Override public void onMessage(String channel, String message) { Event event = EventBuilder.withBody(message, Charset.forName(charset)); channelProcessor.processEvent(event); }
private Sink.Status prepareAndSend(Context context, String msg) throws EventDeliveryException { Sink kafkaSink = new KafkaSink(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes()); memoryChannel.put(event); tx.commit(); tx.close(); return kafkaSink.process(); }
public void append(ThriftFlumeEvent evt ) { if (evt == null) { return; } Map<String, String> headers = new HashMap<String, String>(); // extract Flume event headers headers.put(HOST, evt.getHost()); headers.put(TIMESTAMP, Long.toString(evt.getTimestamp())); headers.put(PRIORITY, evt.getPriority().toString()); headers.put(NANOS, Long.toString(evt.getNanos())); for (Entry<String, ByteBuffer> entry: evt.getFields().entrySet()) { headers.put(entry.getKey().toString(), UTF_8.decode(entry.getValue()).toString()); } headers.put(OG_EVENT, "yes"); Event event = EventBuilder.withBody(evt.getBody(), headers); counterGroup.incrementAndGet("rpc.events"); try { getChannelProcessor().processEvent(event); } catch (ChannelException ex) { LOG.warn("Failed to process event", ex); return; } counterGroup.incrementAndGet("rpc.successful"); return; }
ByteBuffer bytes = Charsets.UTF_8.encode(buffer); Event event = EventBuilder.withBody(body); source.getChannelProcessor().processEvent(event); } catch (ChannelException chEx) { ex = chEx;
public void onStatus(Status status) { Record doc = extractRecord("", avroSchema, status); if (doc == null) { return; // skip } docs.add(doc); if (docs.size() >= maxBatchSize || System.currentTimeMillis() >= batchEndTime) { batchEndTime = System.currentTimeMillis() + maxBatchDurationMillis; byte[] bytes; try { bytes = serializeToAvro(avroSchema, docs); } catch (IOException e) { LOGGER.error("Exception while serializing tweet", e); return; //skip } Event event = EventBuilder.withBody(bytes); getChannelProcessor().processEvent(event); // send event to the flume sink docs.clear(); } docCount++; if ((docCount % REPORT_INTERVAL) == 0) { LOGGER.info(String.format("Processed %s docs", numFormatter.format(docCount))); } if ((docCount % STATS_INTERVAL) == 0) { logStats(); } }
public static Event withBody(byte[] body) { return withBody(body, null); }
public ResultCode Log(List<LogEntry> list) throws TException { if (list != null) { sourceCounter.addToEventReceivedCount(list.size()); try { List<Event> events = new ArrayList<Event>(list.size()); for (LogEntry entry : list) { Map<String, String> headers = new HashMap<String, String>(1, 1); String category = entry.getCategory(); if (category != null) { headers.put(SCRIBE_CATEGORY, category); } Event event = EventBuilder.withBody(entry.getMessage().getBytes(), headers); events.add(event); } if (events.size() > 0) { getChannelProcessor().processEventBatch(events); } sourceCounter.addToEventAcceptedCount(list.size()); return ResultCode.OK; } catch (Exception e) { LOG.warn("Scribe source handling failure", e); sourceCounter.incrementEventReadOrChannelFail(e); } } return ResultCode.TRY_LATER; } }
if (batchSize == 1) { if (eventsSentTX < totalEvents) { getChannelProcessor().processEvent( EventBuilder.withBody(String.valueOf(eventsSentTX++).getBytes())); sourceCounter.incrementEventAcceptedCount(); } else { for (int i = 0; i < batchSize; i++) { if (eventsSentTX < totalEvents) { batchArrayList.add(i, EventBuilder.withBody(String .valueOf(eventsSentTX++).getBytes())); } else { getChannelProcessor().processEventBatch(batchArrayList); sourceCounter.incrementAppendBatchAcceptedCount(); sourceCounter.addToEventAcceptedCount(batchArrayList.size());
@Override public void onPMessage(String pattern, String channel, String message) { Map<String, String> headers = Maps.newHashMap(); headers.put("channel", channel); Event event = EventBuilder.withBody(message, Charset.forName(charset), headers); channelProcessor.processEvent(event); }
@Test public void testTopicAndKeyFromHeader() { Sink kafkaSink = new KafkaSink(); Context context = prepareDefaultContext(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); String msg = "test-topic-and-key-from-header"; Map<String, String> headers = new HashMap<String, String>(); headers.put("topic", TestConstants.CUSTOM_TOPIC); headers.put("key", TestConstants.CUSTOM_KEY); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close(); try { Sink.Status status = kafkaSink.process(); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } checkMessageArrived(msg, TestConstants.CUSTOM_TOPIC); }
public void append(ThriftFlumeEvent evt ) { if (evt == null) { return; } Map<String, String> headers = new HashMap<String, String>(); // extract Flume event headers headers.put(HOST, evt.getHost()); headers.put(TIMESTAMP, Long.toString(evt.getTimestamp())); headers.put(PRIORITY, evt.getPriority().toString()); headers.put(NANOS, Long.toString(evt.getNanos())); for (Entry<String, ByteBuffer> entry: evt.getFields().entrySet()) { headers.put(entry.getKey().toString(), UTF_8.decode(entry.getValue()).toString()); } headers.put(OG_EVENT, "yes"); Event event = EventBuilder.withBody(evt.getBody(), headers); counterGroup.incrementAndGet("rpc.events"); try { getChannelProcessor().processEvent(event); } catch (ChannelException ex) { LOG.warn("Failed to process event", ex); return; } counterGroup.incrementAndGet("rpc.successful"); return; }
ByteBuffer bytes = Charsets.UTF_8.encode(buffer); Event event = EventBuilder.withBody(body); source.getChannelProcessor().processEvent(event); } catch (ChannelException chEx) { ex = chEx;
@Override public Void append( AvroFlumeOGEvent evt ) throws AvroRemoteException { counterGroup.incrementAndGet("rpc.received"); Map<String, String> headers = new HashMap<String, String>(); // extract Flume OG event headers headers.put(HOST, evt.getHost().toString()); headers.put(TIMESTAMP, evt.getTimestamp().toString()); headers.put(PRIORITY, evt.getPriority().toString()); headers.put(NANOS, evt.getNanos().toString()); for (Entry<CharSequence, ByteBuffer> entry : evt.getFields().entrySet()) { headers.put(entry.getKey().toString(), entry.getValue().toString()); } headers.put(OG_EVENT, "yes"); Event event = EventBuilder.withBody(evt.getBody().array(), headers); try { getChannelProcessor().processEvent(event); counterGroup.incrementAndGet("rpc.events"); } catch (ChannelException ex) { return null; } counterGroup.incrementAndGet("rpc.successful"); return null; }
public static Event withBody(String body, Charset charset) { return withBody(body, charset, null); }
@Override public Status appendBatch(List<ThriftFlumeEvent> events) throws TException { sourceCounter.incrementAppendBatchReceivedCount(); sourceCounter.addToEventReceivedCount(events.size()); List<Event> flumeEvents = Lists.newArrayList(); for (ThriftFlumeEvent event : events) { flumeEvents.add(EventBuilder.withBody(event.getBody(), event.getHeaders())); } try { getChannelProcessor().processEventBatch(flumeEvents); } catch (ChannelException ex) { logger.warn("Thrift source %s could not append events to the channel.", getName()); sourceCounter.incrementChannelWriteFail(); return Status.FAILED; } sourceCounter.incrementAppendBatchAcceptedCount(); sourceCounter.addToEventAcceptedCount(events.size()); return Status.OK; } }
@Test public void testPropertiesBatchAppend() throws FlumeException, EventDeliveryException { int batchSize = 7; RpcClient client = null; Server server = RpcTestUtils.startServer(new OKAvroHandler()); try { Properties p = new Properties(); p.put("hosts", "host1"); p.put("hosts.host1", localhost + ":" + String.valueOf(server.getPort())); p.put("batch-size", String.valueOf(batchSize)); client = RpcClientFactory.getInstance(p); List<Event> events = new ArrayList<Event>(); for (int i = 0; i < batchSize; i++) { events.add(EventBuilder.withBody("evt: " + i, Charset.forName("UTF8"))); } client.appendBatch(events); } finally { RpcTestUtils.stopServer(server); if (client != null) client.close(); } }
/** * @param message * @param headers * @return void */ private void send(String message, Map<String, String> headers) { ChannelProcessor channelProcessor = getChannelProcessor(); sourceCounter.addToEventReceivedCount(1); sourceCounter.incrementAppendBatchReceivedCount(); channelProcessor.processEvent(EventBuilder.withBody(message, Charset.forName("UTF-8"), headers)); sourceCounter.addToEventAcceptedCount(1); sourceCounter.incrementAppendBatchAcceptedCount(); headers.clear(); }