@Override public void close() throws IOException { try { flushOperations(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw new IOException(e); } } };
@Override public List<Split> getSplits() { try { try { return delegate.getSplits(); } finally { flushOperations(); } } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public List<Split> getSplits() { try { try { return delegate.getSplits(); } finally { flushOperations(); } } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public void close() throws IOException { try { flushOperations(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw new IOException(e); } } };
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { try { Map<PartitionKey, RecordWriter<?, ?>> recordWriters = new HashMap<>(); recordWriters.putAll(this.recordWriters); MultipleOutputs.closeRecordWriters(recordWriters, contexts); taskContext.flushOperations(); } catch (Exception e) { throw new IOException(e); } finally { dynamicPartitioner.destroy(); } } }
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { try { Map<PartitionKey, RecordWriter<?, ?>> recordWriters = new HashMap<>(); recordWriters.putAll(this.recordWriters); MultipleOutputs.closeRecordWriters(recordWriters, contexts); taskContext.flushOperations(); } catch (Exception e) { throw new IOException(e); } finally { dynamicPartitioner.destroy(); } } }
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { try { // the writer can be null if this writer didn't get any records (split with no data, for instance) if (currRecordWriter != null) { currRecordWriter.close(currContext); } taskContext.flushOperations(); } catch (Exception e) { throw new IOException(e); } finally { dynamicPartitioner.destroy(); } } }
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { try { // the writer can be null if this writer didn't get any records (split with no data, for instance) if (currRecordWriter != null) { currRecordWriter.close(currContext); } taskContext.flushOperations(); } catch (Exception e) { throw new IOException(e); } finally { dynamicPartitioner.destroy(); } } }
private void commitTx(Transaction transaction) throws IOException { try { LOG.debug("Committing MapReduce Job transaction: {}", transaction.getWritePointer()); // "Commit" the data event topic by publishing an empty message. // Need to do it with the raw MessagingService. taskContext.getMessagingService().publish( StoreRequestBuilder .of(NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC))) .setTransaction(transaction.getWritePointer()) .build()); // flush dataset operations (such as from any DatasetOutputCommitters) taskContext.flushOperations(); // no need to rollback changes if commit fails, as these changes where performed by mapreduce tasks // NOTE: can't call afterCommit on datasets in this case: the changes were made by external processes. try { txClient.commitOrThrow(transaction); } catch (TransactionFailureException e) { LOG.warn("MapReduce Job transaction {} failed to commit", transaction.getTransactionId()); throw e; } taskContext.postTxCommit(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
private void commitTx(Transaction transaction) throws IOException { try { LOG.debug("Committing MapReduce Job transaction: {}", transaction.getWritePointer()); // "Commit" the data event topic by publishing an empty message. // Need to do it with the raw MessagingService. taskContext.getMessagingService().publish( StoreRequestBuilder .of(NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC))) .setTransaction(transaction.getWritePointer()) .build()); // flush dataset operations (such as from any DatasetOutputCommitters) taskContext.flushOperations(); // no need to rollback changes if commit fails, as these changes where performed by mapreduce tasks // NOTE: can't call afterCommit on datasets in this case: the changes were made by external processes. try { txClient.commitOrThrow(transaction); } catch (TransactionFailureException e) { LOG.warn("MapReduce Job transaction {} failed to commit", transaction.getTransactionId()); throw e; } taskContext.postTxCommit(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
basicMapReduceContext.flushOperations(); } catch (Exception e) { throw new IOException("Failed to flush operations at the end of mapper of " + basicMapReduceContext, e);
basicMapReduceContext.flushOperations(); } catch (Exception e) { throw new IOException("Failed to flush operations at the end of mapper of " + basicMapReduceContext, e);
basicMapReduceContext.flushOperations(); } catch (Exception e) { LOG.error("Failed to flush operations at the end of reducer of " + basicMapReduceContext, e);
basicMapReduceContext.flushOperations(); } catch (Exception e) { LOG.error("Failed to flush operations at the end of reducer of " + basicMapReduceContext, e);