private void setStatus(String line) { reporter.setStatus(line.substring(statusPrefix.length()).trim()); } }
private void setStatus(String line) { reporter.setStatus(line.substring(statusPrefix.length()).trim()); } }
@Override public RecordReader<NullWritable, BytesWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new TeradataBinaryRecordReader(job, (FileSplit) split); }
@Override @SuppressWarnings("unchecked") public RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new RCFileRecordReader(job, (FileSplit) split); }
@Override public RecordReader<Void, RowContainer<T>> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new FlatFileRecordReader<T>(job, (FileSplit) split); } }
@Override public RecordReader<RCFileKeyBufferWrapper, RCFileValueBufferWrapper> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new RCFileBlockMergeRecordReader(job, (FileSplit) split); }
@Override public RecordReader<OrcFileKeyWrapper, OrcFileValueWrapper> getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new OrcFileStripeMergeRecordReader(job, (FileSplit) split); }
@Override public org.apache.hadoop.mapred.RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new PTFSequenceFileRecordReader<K, V>(job, (FileSplit) split); } }
@Override @SuppressWarnings("unchecked") public RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new RCFileRecordReader(job, (FileSplit) split); }
@Override public RecordReader<Void, RowContainer<T>> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new FlatFileRecordReader<T>(job, (FileSplit) split); } }
@Override public RecordReader<AvroWrapper<T>, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new AvroRecordReader<>(job, (FileSplit) split); }
@Override public RecordReader<AvroWrapper<Pair<K,V>>, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new SequenceFileRecordReader<>(job, (FileSplit) split); }
@Override public org.apache.hadoop.mapred.RecordReader<K, V> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new PTFSequenceFileRecordReader<K, V>(job, (FileSplit) split); } }
@Override public RecordReader<OrcFileKeyWrapper, OrcFileValueWrapper> getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new OrcFileStripeMergeRecordReader(job, (FileSplit) split); }
@Override public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new AvroAsTextRecordReader(job, (FileSplit) split); } }
@Override public RecordReader<AvroWrapper<Utf8>, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new Utf8LineRecordReader(job, (FileSplit) split); }
@Override public RecordReader<TetherData, NullWritable> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new TetherRecordReader(job, (FileSplit)split); }
@Override public RecordReader<RCFileKeyBufferWrapper, RCFileValueBufferWrapper> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(split.toString()); return new RCFileBlockMergeRecordReader(job, (FileSplit) split); }
public EsInputRecordReader(org.apache.hadoop.mapred.InputSplit split, Configuration job, Reporter reporter) { reporter.setStatus(split.toString()); init((EsInputSplit) split, job, reporter); }
public RecordReader<LongWritable, BytesWritable> getRecordReader( InputSplit genericSplit, JobConf job, Reporter reporter) throws IOException { reporter.setStatus(genericSplit.toString()); Base64LineRecordReader reader = new Base64LineRecordReader( new LineRecordReader(job, (FileSplit) genericSplit)); reader.configure(job); return reader; }