@Override public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) { this.collector = collector; batchHelper = new BatchHelper(batchSize, collector); openTsdbClient = openTsdbClientBuilder.build(); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { flushTuples(); batchHelper.ack(); } } catch (Exception e) { batchHelper.fail(e); } }
private void flushTuples() { List<Document> docs = new LinkedList<>(); for (Tuple t : batchHelper.getBatchTuples()) { Document doc = mapper.toDocument(t); docs.add(doc); } mongoClient.insert(docs, ordered); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { byte[] rowKey = this.mapper.rowKey(tuple); ColumnList cols = this.mapper.columns(tuple); List<Mutation> mutations = hBaseClient.constructMutationReq(rowKey, cols, writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); batchMutations.addAll(mutations); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { this.hBaseClient.batchMutate(batchMutations); LOG.debug("acknowledging tuples after batchMutate"); batchHelper.ack(); batchMutations.clear(); } } catch (Exception e) { batchHelper.fail(e); batchMutations.clear(); } }
@Override public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) { super.prepare(topoConf, context, collector); this.batchHelper = new BatchHelper(batchSize, collector); }
private void flushTuples(){ List<Document> docs = new LinkedList<>(); for (Tuple t : batchHelper.getBatchTuples()) { Document doc = mapper.toDocument(t); docs.add(doc); } mongoClient.insert(docs, ordered); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { final List<OpenTsdbMetricDatapoint> metricDataPoints = getMetricPoints(tuple); for (OpenTsdbMetricDatapoint metricDataPoint : metricDataPoints) { metricPointsWithTuple.put(metricDataPoint, tuple); batchHelper.addBatch(tuple); if (batchHelper.shouldFlush()) { LOG.debug("Sending metrics of size [{}]", metricPointsWithTuple.size()); for (Tuple batchedTuple : batchHelper.getBatchTuples()) { if (failedTuples.contains(batchedTuple)) { collector.fail(batchedTuple); batchHelper.ack(); batchHelper.fail(e); metricPointsWithTuple.clear();
@Override public void prepare(Map<String, Object> map, TopologyContext topologyContext, OutputCollector collector) { super.prepare(map, topologyContext, collector); this.batchHelper = new BatchHelper(batchSize, collector); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { List<String> partitionVals = options.getMapper().mapPartitions(tuple); HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options); HiveWriter writer = getOrCreateWriter(endPoint); writer.write(options.getMapper().mapRecord(tuple)); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { flushAllWriters(true); LOG.info("acknowledging tuples after writers flushed "); batchHelper.ack(); } if (TupleUtils.isTick(tuple)) { retireIdleWriters(); } } catch (SerializationError se) { LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple); this.collector.reportError(se); collector.ack(tuple); } catch (Exception e) { batchHelper.fail(e); abortAndCloseWriters(); } }
@Override public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) { Validate.notEmpty(properties, "Producer properties can not be empty"); Validate.notNull(selector, "TopicSelector can not be null"); Validate.notNull(mapper, "TupleToMessageMapper can not be null"); producer = new DefaultMQProducer(); producer.setInstanceName(String.valueOf(context.getThisTaskId())); RocketMqConfig.buildProducerConfigs(properties, producer); try { producer.start(); } catch (MQClientException e) { throw new RuntimeException(e); } this.collector = collector; this.batchHelper = new BatchHelper(batchSize, collector); this.messages = new LinkedList<>(); }
if (batchHelper.shouldHandle(input)) { batchHelper.addBatch(input); messages.add(prepareMessage(input)); if (batchHelper.shouldFlush()) { producer.send(messages); batchHelper.ack(); messages.clear(); batchHelper.fail(e); messages.clear();
@Override public void prepare(Map<String, Object> conf, TopologyContext topologyContext, OutputCollector collector) { try { tokenAuthEnabled = HiveUtils.isTokenAuthEnabled(conf); try { ugi = HiveUtils.authenticate(tokenAuthEnabled, options.getKerberosKeytab(), options.getKerberosPrincipal()); } catch (HiveUtils.AuthenticationFailed ex) { LOG.error("Hive kerberos authentication failed " + ex.getMessage(), ex); throw new IllegalArgumentException(ex); } this.collector = collector; this.batchHelper = new BatchHelper(options.getBatchSize(), collector); allWriters = new ConcurrentHashMap<HiveEndPoint, HiveWriter>(); String timeoutName = "hive-bolt-%d"; this.callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); sendHeartBeat.set(true); heartBeatTimer = new Timer(topologyContext.getThisTaskId() + "-hb-timer", true); setupHeartBeatTimer(); } catch (Exception e) { LOG.warn("unable to make connection to hive ", e); } }
@Override public void execute(Tuple tuple) { try{ if(batchHelper.shouldHandle(tuple)){ batchHelper.addBatch(tuple); } if(batchHelper.shouldFlush()) { flushTuples(); batchHelper.ack(); } } catch (Exception e) { batchHelper.fail(e); } }
@Override public void prepare(Map map, TopologyContext topologyContext, OutputCollector collector) { super.prepare(map, topologyContext, collector); this.batchHelper = new BatchHelper(batchSize, collector); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { byte[] rowKey = this.mapper.rowKey(tuple); ColumnList cols = this.mapper.columns(tuple); List<Mutation> mutations = hBaseClient.constructMutationReq(rowKey, cols, writeToWAL? Durability.SYNC_WAL : Durability.SKIP_WAL); batchMutations.addAll(mutations); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { this.hBaseClient.batchMutate(batchMutations); LOG.debug("acknowledging tuples after batchMutate"); batchHelper.ack(); batchMutations.clear(); } } catch(Exception e){ batchHelper.fail(e); batchMutations.clear(); } }
@Override public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; batchHelper = new BatchHelper(batchSize, collector); openTsdbClient = openTsdbClientBuilder.build(); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { final List<OpenTsdbMetricDatapoint> metricDataPoints = getMetricPoints(tuple); for (OpenTsdbMetricDatapoint metricDataPoint : metricDataPoints) { metricPointsWithTuple.put(metricDataPoint, tuple); batchHelper.addBatch(tuple); if (batchHelper.shouldFlush()) { LOG.debug("Sending metrics of size [{}]", metricPointsWithTuple.size()); for (Tuple batchedTuple : batchHelper.getBatchTuples()) { if (failedTuples.contains(batchedTuple)) { collector.fail(batchedTuple); batchHelper.ack(); batchHelper.fail(e); metricPointsWithTuple.clear();
@Override public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { super.prepare(stormConf, context, collector); this.batchHelper = new BatchHelper(batchSize, collector); }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { List<String> partitionVals = options.getMapper().mapPartitions(tuple); HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options); HiveWriter writer = getOrCreateWriter(endPoint); writer.write(options.getMapper().mapRecord(tuple)); batchHelper.addBatch(tuple); } if(batchHelper.shouldFlush()) { flushAllWriters(true); LOG.info("acknowledging tuples after writers flushed "); batchHelper.ack(); } if (TupleUtils.isTick(tuple)) { retireIdleWriters(); } } catch(SerializationError se) { LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple); this.collector.reportError(se); collector.ack(tuple); } catch(Exception e) { batchHelper.fail(e); abortAndCloseWriters(); } }
@Override public void prepare(Map conf, TopologyContext topologyContext, OutputCollector collector) { try { tokenAuthEnabled = HiveUtils.isTokenAuthEnabled(conf); try { ugi = HiveUtils.authenticate(tokenAuthEnabled, options.getKerberosKeytab(), options.getKerberosPrincipal()); } catch(HiveUtils.AuthenticationFailed ex) { LOG.error("Hive kerberos authentication failed " + ex.getMessage(), ex); throw new IllegalArgumentException(ex); } this.collector = collector; this.batchHelper = new BatchHelper(options.getBatchSize(), collector); allWriters = new ConcurrentHashMap<HiveEndPoint,HiveWriter>(); String timeoutName = "hive-bolt-%d"; this.callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); sendHeartBeat.set(true); heartBeatTimer = new Timer(); setupHeartBeatTimer(); } catch(Exception e) { LOG.warn("unable to make connection to hive ", e); } }