@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { tables = new TableName[]{TABLE_NAMES[0]}; } }
@Override protected void cleanup(Mapper<WALKey, WALEdit, ImmutableBytesWritable, Mutation>.Context context) throws IOException, InterruptedException { super.cleanup(context); }
/** {@inheritDoc} */ @Override public void run(Context ctx) throws IOException, InterruptedException { try { super.run(ctx); } catch (HadoopTaskCancelledException e) { cancelledTasks.incrementAndGet(); throw e; } }
protected void doMap(KEYIN key, VALUEIN value, Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context context) throws IOException, InterruptedException { super.map(key, value, context); }
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { if (ioEx) { throw new IOException(); } if (rtEx) { throw new RuntimeException(); } super.map(key, value, context); } }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { tables = new TableName[]{TABLE_NAMES[0]}; } }
@Override protected void cleanup(Context context) throws IOException, InterruptedException { super.cleanup(context); job.workerIterationEnd(metrics); }
mapper.run(new WrappedMapper().getMapContext(hadoopCtx));
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { if (ioEx) { throw new IOException(); } if (rtEx) { throw new RuntimeException(); } super.map(key, value, context); } }
@Override protected void setup(Context context) throws IOException, InterruptedException { Map<String, String> configMap = Maps.newHashMap(); SharedResourcesBrokerFactory.addBrokerKeys(configMap, context.getConfiguration()); this.broker = SharedResourcesBrokerFactory.createDefaultTopLevelBroker(ConfigFactory.parseMap(configMap), SimpleScopeType.GLOBAL.defaultScopeInstance()); super.setup(context); }
/** {@inheritDoc} */ @Override protected void cleanup(Context ctx) throws IOException, InterruptedException { super.cleanup(ctx); HadoopErrorSimulator.instance().onMapCleanup(); }
/** * Invokes {@code Mapper#run(Context)} internally. * Clients can override this method and implement customized {@code run} method. * @param context current context * @throws IOException if task is failed by I/O error * @throws InterruptedException if task execution is interrupted */ protected void runInternal(Context context) throws IOException, InterruptedException { super.run(context); }
/** {@inheritDoc} */ @Override protected void setup(Context ctx) throws IOException, InterruptedException { super.setup(ctx); wasSetUp = true; HadoopErrorSimulator.instance().onMapSetup(); }
@Override protected void cleanup(Context context) throws IOException, InterruptedException { super.cleanup(context); if (connection != null) { try { connection.close(); } catch (SQLException e) { LOG.error("Error {} while closing connection in the PhoenixIndexMapper class ", e.getMessage()); } } } }
@SuppressWarnings("unchecked") public void run(Context context) throws IOException, InterruptedException { setup(context); mapper.run(context); cleanup(context); } }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); org.apache.hadoop.conf.Configuration hadoopConf = DEFAULT_COMPAT.getContextConfiguration(context); ModifiableHadoopConfiguration scanConf = ModifiableHadoopConfiguration.of(TitanHadoopConfiguration.MAPRED_NS, hadoopConf); job = getJob(scanConf); metrics = new HadoopContextScanMetrics(context); Configuration graphConf = getTitanConfiguration(context); finishSetup(scanConf, graphConf); }
@Override protected void cleanup(Context context) throws IOException, InterruptedException { super.cleanup(context); if (connection != null) { try { processBatch(context); connection.close(); if (outputConn != null) { outputConn.close(); } } catch (SQLException e) { LOG.error("Error while closing connection in the PhoenixIndexMapper class ", e); throw new IOException(e); } } }
@Override public void run(Context context) throws IOException, InterruptedException { // Find the InputProcessor from the TaggedInputSplit. if(delegate == null) { TaggedInputSplit inputSplit = (TaggedInputSplit) context.getInputSplit(); log.info("[profile] Got input split. Going to look at DC."); delegate = InstancesDistributor.loadInstance(context.getConfiguration(), Mapper.class, inputSplit.getInputProcessorFile(), true); log.info("[profile] Finished. Calling run() on delegate."); } delegate.run(context); } }
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); filter = PcapFilters.valueOf(context.getConfiguration().get(PcapFilterConfigurator.PCAP_FILTER_NAME_CONF)).create(); filter.configure(context.getConfiguration()); start = Long.parseUnsignedLong(context.getConfiguration().get(START_TS_CONF)); end = Long.parseUnsignedLong(context.getConfiguration().get(END_TS_CONF)); }
super.cleanup(context); } catch (SQLException e) { LOG.error(" Error {} while read/write of a record ", e.getMessage());