@Override public int compareTo(TableSplit o) { return Bytes.compareTo(getStartRow(), o.getStartRow()); }
trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table);
public int compareTo(TableSplit o) { return Bytes.compareTo(getStartRow(), o.getStartRow()); } }
@Override public int compareTo(TableSplit o) { return Bytes.compareTo(getStartRow(), o.getStartRow()); }
@Override public int compareTo(TableSplit o) { return Bytes.compareTo(getStartRow(), o.getStartRow()); }
@Override public int compareTo(TableSplit o) { return Bytes.compareTo(getStartRow(), o.getStartRow()); }
@Override public RecordReader<ImmutableBytesWritable, ResultWritable> getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO); InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString); String tableName = job.get(TableInputFormat.INPUT_TABLE); TableSplit tSplit = (TableSplit) split; HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job); inputFormat.setConf(job); Scan inputScan = inputFormat.getScan(); // TODO: Make the caching configurable by the user inputScan.setCaching(200); inputScan.setCacheBlocks(false); Scan sc = new Scan(inputScan); sc.setStartRow(tSplit.getStartRow()); sc.setStopRow(tSplit.getEndRow()); recordReader.setScan(sc); recordReader.setHTable(new HTable(job, tableName)); recordReader.init(); return recordReader; }
trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table);
trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table);
trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table);
/** * Builds a TableRecordReader. If no TableRecordReader was provided, uses * the default. * * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, * JobConf, Reporter) */ public RecordReader<ImmutableBytesWritable, Result> getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { TableSplit tSplit = (TableSplit) split; TableRecordReader trr = this.tableRecordReader; // if no table record reader was provided use default if (trr == null) { trr = new TableRecordReader(); } trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table); trr.setInputColumns(this.inputColumns); trr.setRowFilter(this.rowFilter); trr.init(); return trr; }