batchHelper.fail(e); metricPointsWithTuple.clear();
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { byte[] rowKey = this.mapper.rowKey(tuple); ColumnList cols = this.mapper.columns(tuple); List<Mutation> mutations = hBaseClient.constructMutationReq(rowKey, cols, writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); batchMutations.addAll(mutations); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { this.hBaseClient.batchMutate(batchMutations); LOG.debug("acknowledging tuples after batchMutate"); batchHelper.ack(); batchMutations.clear(); } } catch (Exception e) { batchHelper.fail(e); batchMutations.clear(); } }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { flushTuples(); batchHelper.ack(); } } catch (Exception e) { batchHelper.fail(e); } }
batchHelper.fail(e); messages.clear();
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { List<String> partitionVals = options.getMapper().mapPartitions(tuple); HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options); HiveWriter writer = getOrCreateWriter(endPoint); writer.write(options.getMapper().mapRecord(tuple)); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { flushAllWriters(true); LOG.info("acknowledging tuples after writers flushed "); batchHelper.ack(); } if (TupleUtils.isTick(tuple)) { retireIdleWriters(); } } catch (SerializationError se) { LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple); this.collector.reportError(se); collector.ack(tuple); } catch (Exception e) { batchHelper.fail(e); abortAndCloseWriters(); } }
batchHelper.fail(e); metricPointsWithTuple.clear();
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { byte[] rowKey = this.mapper.rowKey(tuple); ColumnList cols = this.mapper.columns(tuple); List<Mutation> mutations = hBaseClient.constructMutationReq(rowKey, cols, writeToWAL? Durability.SYNC_WAL : Durability.SKIP_WAL); batchMutations.addAll(mutations); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { this.hBaseClient.batchMutate(batchMutations); LOG.debug("acknowledging tuples after batchMutate"); batchHelper.ack(); batchMutations.clear(); } } catch(Exception e){ batchHelper.fail(e); batchMutations.clear(); } }
@Override public void execute(Tuple tuple) { try{ if(batchHelper.shouldHandle(tuple)){ batchHelper.addBatch(tuple); } if(batchHelper.shouldFlush()) { flushTuples(); batchHelper.ack(); } } catch (Exception e) { batchHelper.fail(e); } }
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { List<String> partitionVals = options.getMapper().mapPartitions(tuple); HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options); HiveWriter writer = getOrCreateWriter(endPoint); writer.write(options.getMapper().mapRecord(tuple)); batchHelper.addBatch(tuple); } if(batchHelper.shouldFlush()) { flushAllWriters(true); LOG.info("acknowledging tuples after writers flushed "); batchHelper.ack(); } if (TupleUtils.isTick(tuple)) { retireIdleWriters(); } } catch(SerializationError se) { LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple); this.collector.reportError(se); collector.ack(tuple); } catch(Exception e) { batchHelper.fail(e); abortAndCloseWriters(); } }