private void checkFailures() { if (executorFuture != null && executorFuture.isDone()) { try { executorFuture.get(); } catch (InterruptedException ex) { LOG.error("Got exception ", ex); throw new FailedException(ex); } catch (ExecutionException ex) { LOG.error("Got exception ", ex); throw new FailedException(ex.getCause()); } } }
@Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(getMessage()) .append(System.lineSeparator()) .append("Multiple exceptions encountered: ") .append(System.lineSeparator()); for (Throwable exception : exceptions) { sb.append(exception.toString()) .append(System.lineSeparator()); } return super.toString(); }
private void checkFailures() { if (executorFuture != null && executorFuture.isDone()) { try { executorFuture.get(); } catch (InterruptedException ex) { LOG.error("Got exception ", ex); throw new FailedException(ex); } catch (ExecutionException ex) { LOG.error("Got exception ", ex); throw new FailedException(ex.getCause()); } } }
public void updateState(List<TridentTuple> tuples, TridentCollector tridentCollector) { try { this.options.execute(tuples); } catch (IOException e) { LOG.warn("Failing batch due to IOException.", e); throw new FailedException(e); } }
@Override public void updateState(JmsState jmsState, List<TridentTuple> tuples, TridentCollector collector) { try { jmsState.updateState(tuples, collector); } catch (JMSException e) { throw new FailedException("failed JMS opetation", e); } } }
@Override public void commit(Long txId) { try { flushAllWriters(); currentBatchSize = 0; } catch (HiveWriter.TxnFailure | InterruptedException | HiveWriter.CommitFailure | HiveWriter.TxnBatchFailure ex) { LOG.warn("Commit failed. Failing the batch.", ex); throw new FailedException(ex); } }
@Override public void commit(Long txId) { try { options.doCommit(txId); } catch (IOException e) { LOG.warn("Commit failed due to IOException. Failing the batch.", e); throw new FailedException(e); } }
/** * {@inheritDoc} */ @Override public List<Integer> chooseTasks(int taskId, List<Object> values) { try { int n = Math.abs((int) hashes(getKeyValues(values)) % targetTasks.size()); return Lists.newArrayList(targetTasks.get(n)); } catch (IOException e) { throw new FailedException(e); } }
@Override public void execute(Tuple tuple, BasicOutputCollector collector) { Integer c1 = tuple.getInteger(0); Integer c2 = tuple.getInteger(1); Integer exp = expected.get(c1); if (exp == null) exp = 0; if (c2.intValue() != exp.intValue()) { System.out.println(c1 + " " + c2 + " != " + exp); throw new FailedException(c1 + " " + c2 + " != " + exp); } exp = c2 + 1; expected.put(c1, exp); }
public List<T> multiUpdate(List<List<Object>> keys, List<ValueUpdater> updaters) { print(keys, updaters); if ((updateCount++ % 5) == 0) { LOG.error("Throwing FailedException"); throw new FailedException("Enforced State Update Fail. On retrial should replay the exact same batch."); } return super.multiUpdate(keys, updaters); }
@Override public void execute(TridentTuple tuple, TridentCollector collector) { MqttMessage message = this.mapper.toMessage(tuple); try { this.publisher.publish(message); } catch (Exception e) { LOG.warn("Error publishing MQTT message. Failing tuple.", e); // should we fail the batch or kill the worker? throw new FailedException(); } } }
public void updateState(List<TridentTuple> tuples, TridentCollector collector) { try { writeTuples(tuples); } catch (Exception e) { abortAndCloseWriters(); LOG.warn("hive streaming failed.", e); throw new FailedException(e); } }
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) { Preconditions.checkNotNull(options.cqlResultSetValuesMapper, "CassandraState.Options should have cqlResultSetValuesMapper"); List<List<Values>> batchRetrieveResult = new ArrayList<>(); try { for (TridentTuple tridentTuple : tridentTuples) { List<Statement> statements = options.cqlStatementTupleMapper.map(conf, session, tridentTuple); for (Statement statement : statements) { List<List<Values>> values = options.cqlResultSetValuesMapper.map(session, statement, tridentTuple); batchRetrieveResult.addAll(values); } } } catch (Exception e) { LOG.warn("Batch retrieve operation is failed", e); throw new FailedException(e); } return batchRetrieveResult; }
/** * Update Mongo state. * @param tuples trident tuples * @param collector trident collector */ public void updateState(List<TridentTuple> tuples, TridentCollector collector) { List<Document> documents = Lists.newArrayList(); for (TridentTuple tuple : tuples) { Document document = options.mapper.toDocument(tuple); documents.add(document); } try { this.mongoClient.insert(documents, true); } catch (Exception e) { LOG.warn("Batch write failed but some requests might have succeeded. Triggering replay.", e); throw new FailedException(e); } }
@Override public void multiPut(List<List<Object>> keys, List<T> values) { LOG.debug("multiPut writing {} values.", keys.size()); List<Statement> statements = new ArrayList<>(); for (int i = 0; i < keys.size(); i++) { Values stateValues = options.stateMapper.toValues(values.get(i)); SimpleTuple tuple = new SimpleTuple(allFields, keys.get(i), stateValues); statements.addAll(options.putMapper.map(conf, session, tuple)); } try { putResultMapper.map(session, statements, null); } catch (Exception e) { LOG.warn("Write operation failed: {}", e.getMessage()); throw new FailedException(e); } }
@Override public void multiPut(List<List<Object>> keys, List<T> values) { List<Put> puts = new ArrayList<Put>(keys.size()); for (int i = 0; i < keys.size(); i++) { byte[] hbaseKey = this.options.mapMapper.rowKey(keys.get(i)); String qualifier = this.options.mapMapper.qualifier(keys.get(i)); LOG.info("Partiton: {}, Key: {}, Value: {}", new Object[]{ this.partitionNum, new String(hbaseKey), new String(this.serializer.serialize(values.get(i))) }); Put put = new Put(hbaseKey); T val = values.get(i); put.add(this.options.columnFamily.getBytes(), qualifier.getBytes(), this.serializer.serialize(val)); puts.add(put); } try { this.table.put(puts); } catch (InterruptedIOException e) { throw new FailedException("Interrupted while writing to HBase", e); } catch (RetriesExhaustedWithDetailsException e) { throw new FailedException("Retries exhaused while writing to HBase", e); } catch (IOException e) { throw new FailedException("IOException while writing to HBase", e); } }
public void updateState(List<TridentTuple> tuples, TridentCollector collector) { List<List<Column>> columnsLists = new ArrayList<List<Column>>(); for (TridentTuple tuple : tuples) { columnsLists.add(options.mapper.getColumns(tuple)); } try { if (!StringUtils.isBlank(options.tableName)) { jdbcClient.insert(options.tableName, columnsLists); } else { jdbcClient.executeInsertQuery(options.insertQuery, columnsLists); } } catch (Exception e) { LOG.warn("Batch write failed but some requests might have succeeded. Triggering replay.", e); throw new FailedException(e); } }
private void updateIndex(long txId) { LOG.debug("Starting index update."); final Path tmpPath = tmpFilePath(indexFilePath.toString()); try (FSDataOutputStream out = this.options.fs.create(tmpPath, true); BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out))) { TxnRecord txnRecord = new TxnRecord(txId, options.currentFile.toString(), this.options.getCurrentOffset()); bw.write(txnRecord.toString()); bw.newLine(); bw.flush(); out.close(); /* In non error scenarios, for the Azure Data Lake Store File System (adl://), the output stream must be closed before the file associated with it is deleted. For ADLFS deleting the file also removes any handles to the file, hence out.close() will fail. */ /* * Delete the current index file and rename the tmp file to atomically * replace the index file. Orphan .tmp files are handled in getTxnRecord. */ options.fs.delete(this.indexFilePath, false); options.fs.rename(tmpPath, this.indexFilePath); lastSeenTxn = txnRecord; LOG.debug("updateIndex updated lastSeenTxn to [{}]", this.lastSeenTxn); } catch (IOException e) { LOG.warn("Begin commit failed due to IOException. Failing batch", e); throw new FailedException(e); } }
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) { List<List<Values>> batchRetrieveResult = Lists.newArrayList(); try { for (TridentTuple tuple : tridentTuples) { List<Column> columns = options.jdbcLookupMapper.getColumns(tuple); List<List<Column>> rows = jdbcClient.select(options.selectQuery, columns); for (List<Column> row : rows) { List<Values> values = options.jdbcLookupMapper.toTuple(tuple, row); batchRetrieveResult.add(values); } } } catch (Exception e) { LOG.warn("Batch get operation failed. Triggering replay.", e); throw new FailedException(e); } return batchRetrieveResult; }
/** * Store current state to ElasticSearch. * * @param tuples list of tuples for storing to ES. * Each tuple should have relevant fields (source, index, type, id) for EsState's tupleMapper to extract ES document. */ public void updateState(List<TridentTuple> tuples) { try { String bulkRequest = buildRequest(tuples); Response response = client.performRequest("post", "_bulk", new HashMap<>(), new StringEntity(bulkRequest.toString())); BulkIndexResponse bulkResponse = objectMapper.readValue(response.getEntity().getContent(), BulkIndexResponse.class); if (bulkResponse.hasErrors()) { LOG.warn("failed processing bulk index requests: " + bulkResponse.getFirstError() + ": " + bulkResponse.getFirstResult()); throw new FailedException(); } } catch (IOException e) { LOG.warn("failed processing bulk index requests: " + e.toString()); throw new FailedException(e); } } }