@Override public <T extends Dataset> T getDataset(String namespace, String name, Map<String, String> arguments, AccessType accessType) throws DatasetInstantiationException { T dataset = super.getDataset(namespace, name, adjustRuntimeArguments(arguments), accessType); startDatasetTransaction(dataset); return dataset; }
@Override protected void shutDown() throws Exception { // Close all the contexts to release resources for (BasicMapReduceTaskContext context : taskContexts.asMap().values()) { try { context.close(); } catch (Exception e) { LOG.warn("Exception when closing context {}", context, e); } } }
@Override public void discardDataset(Dataset dataset) { delegate.discardDataset(dataset); }
String program = basicMapReduceContext.getProgramName(); basicMapReduceContext.getProgramMetrics(), context); basicMapReduceContext.setHadoopContext(flushingContext); InputSplit inputSplit = context.getInputSplit(); if (inputSplit instanceof MultiInputTaggedSplit) { basicMapReduceContext.setInputContext(InputContexts.create((MultiInputTaggedSplit) inputSplit)); new PropertyFieldSetter(basicMapReduceContext.getSpecification().getProperties()), new MetricsFieldSetter(basicMapReduceContext.getMetrics()), new DataSetFieldSetter(basicMapReduceContext)); } catch (Throwable t) { basicMapReduceContext.flushOperations(); } catch (Exception e) { throw new IOException("Failed to flush operations at the end of mapper of " + basicMapReduceContext, e); basicMapReduceContext.closeMultiOutputs();
long metricsReportInterval = basicMapReduceContext.getMetricsReportIntervalMillis(); final ReduceTaskMetricsWriter reduceTaskMetricsWriter = new ReduceTaskMetricsWriter( basicMapReduceContext.getProgramMetrics(), context); basicMapReduceContext.setHadoopContext(flushingContext); new PropertyFieldSetter(basicMapReduceContext.getSpecification().getProperties()), new MetricsFieldSetter(basicMapReduceContext.getMetrics()), new DataSetFieldSetter(basicMapReduceContext)); } catch (Throwable t) { basicMapReduceContext.flushOperations(); } catch (Exception e) { LOG.error("Failed to flush operations at the end of reducer of " + basicMapReduceContext, e); basicMapReduceContext.closeMultiOutputs();
private void commitTx(Transaction transaction) throws IOException { try { LOG.debug("Committing MapReduce Job transaction: {}", transaction.getWritePointer()); // "Commit" the data event topic by publishing an empty message. // Need to do it with the raw MessagingService. taskContext.getMessagingService().publish( StoreRequestBuilder .of(NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC))) .setTransaction(transaction.getWritePointer()) .build()); // flush dataset operations (such as from any DatasetOutputCommitters) taskContext.flushOperations(); // no need to rollback changes if commit fails, as these changes where performed by mapreduce tasks // NOTE: can't call afterCommit on datasets in this case: the changes were made by external processes. try { txClient.commitOrThrow(transaction); } catch (TransactionFailureException e) { LOG.warn("MapReduce Job transaction {} failed to commit", transaction.getTransactionId()); throw e; } taskContext.postTxCommit(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@Override public List<Split> getSplits() { try { try { return delegate.getSplits(); } finally { flushOperations(); } } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public ApplicationSpecification getApplicationSpecification() { return delegate.getApplicationSpecification(); }
@Override public Map<String, File> getAllLocalFiles() { return delegate.getAllLocalFiles(); }
@Override public Admin getAdmin() { return delegate.getAdmin(); }
TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class); NamespaceQueryAdmin namespaceQueryAdmin = injector.getInstance(NamespaceQueryAdmin.class); return new BasicMapReduceTaskContext( program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient,
@Override public SecureStoreData get(String namespace, String name) throws Exception { return delegate.get(namespace, name); }
String program = basicMapReduceContext.getProgramName(); basicMapReduceContext.getProgramMetrics(), context); basicMapReduceContext.setHadoopContext(flushingContext); InputSplit inputSplit = context.getInputSplit(); if (inputSplit instanceof MultiInputTaggedSplit) { basicMapReduceContext.setInputContext(InputContexts.create((MultiInputTaggedSplit) inputSplit)); new PropertyFieldSetter(basicMapReduceContext.getSpecification().getProperties()), new MetricsFieldSetter(basicMapReduceContext.getMetrics()), new DataSetFieldSetter(basicMapReduceContext)); } catch (Throwable t) { basicMapReduceContext.flushOperations(); } catch (Exception e) { throw new IOException("Failed to flush operations at the end of mapper of " + basicMapReduceContext, e); basicMapReduceContext.closeMultiOutputs();
long metricsReportInterval = basicMapReduceContext.getMetricsReportIntervalMillis(); final ReduceTaskMetricsWriter reduceTaskMetricsWriter = new ReduceTaskMetricsWriter( basicMapReduceContext.getProgramMetrics(), context); basicMapReduceContext.setHadoopContext(flushingContext); new PropertyFieldSetter(basicMapReduceContext.getSpecification().getProperties()), new MetricsFieldSetter(basicMapReduceContext.getMetrics()), new DataSetFieldSetter(basicMapReduceContext)); } catch (Throwable t) { basicMapReduceContext.flushOperations(); } catch (Exception e) { LOG.error("Failed to flush operations at the end of reducer of " + basicMapReduceContext, e); basicMapReduceContext.closeMultiOutputs();
private void commitTx(Transaction transaction) throws IOException { try { LOG.debug("Committing MapReduce Job transaction: {}", transaction.getWritePointer()); // "Commit" the data event topic by publishing an empty message. // Need to do it with the raw MessagingService. taskContext.getMessagingService().publish( StoreRequestBuilder .of(NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC))) .setTransaction(transaction.getWritePointer()) .build()); // flush dataset operations (such as from any DatasetOutputCommitters) taskContext.flushOperations(); // no need to rollback changes if commit fails, as these changes where performed by mapreduce tasks // NOTE: can't call afterCommit on datasets in this case: the changes were made by external processes. try { txClient.commitOrThrow(transaction); } catch (TransactionFailureException e) { LOG.warn("MapReduce Job transaction {} failed to commit", transaction.getTransactionId()); throw e; } taskContext.postTxCommit(); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
@Override public List<Split> getSplits() { try { try { return delegate.getSplits(); } finally { flushOperations(); } } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public ApplicationSpecification getApplicationSpecification() { return delegate.getApplicationSpecification(); }
@Override public Map<String, File> getAllLocalFiles() { return delegate.getAllLocalFiles(); }
@Override public Admin getAdmin() { return delegate.getAdmin(); }
return new BasicMapReduceTaskContext( program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient,