@Override public void close() throws IOException { trr.close(); closeTable(); }
/** * Build the scanner. Not done in constructor to allow for extension. * * @throws IOException */ public void init() throws IOException { this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow()); }
public static void initTableMapJob(String table, String columns, Class<? extends TableMap> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, JobConf job, boolean addDependencyJars) { initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableInputFormat.class); }
@Override protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName, String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { doTestWithMapReduce(util, tableName, snapshotName, getStartRow(), getEndRow(), tableDir, numRegions, numSplitsPerRegion, expectedNumSplits, shutdownCluster); }
@Override public boolean next(ImmutableBytesWritable key, Result value) throws IOException { return trr.next(key, value); } };
public long getPos() { // This should be the ordinal tuple in the range; // not clear how to calculate... return this.recordReaderImpl.getPos(); }
/** * @param inputColumns the columns to be placed in {@link Result}. */ public void setInputColumns(final byte [][] inputColumns) { this.recordReaderImpl.setInputColumns(inputColumns); }
/** * @param startRow the first row in the split */ public void setStartRow(final byte [] startRow) { this.recordReaderImpl.setStartRow(startRow); }
/** * @param key HStoreKey as input key. * @param value MapWritable as input value * @return true if there was more data * @throws IOException */ public boolean next(ImmutableBytesWritable key, Result value) throws IOException { return this.recordReaderImpl.next(key, value); } }
/** * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { this.recordReaderImpl.setHTable(htable); }
/** * @return ImmutableBytesWritable * * @see org.apache.hadoop.mapred.RecordReader#createKey() */ public ImmutableBytesWritable createKey() { return this.recordReaderImpl.createKey(); }
/** * @return RowResult * * @see org.apache.hadoop.mapred.RecordReader#createValue() */ public Result createValue() { return this.recordReaderImpl.createValue(); }
/** * Restart from survivable exceptions by creating a new scanner. * * @param firstRow * @throws IOException */ public void restart(byte[] firstRow) throws IOException { this.recordReaderImpl.restart(firstRow); }
/** * * @param endRow the last row in the split */ public void setEndRow(final byte [] endRow) { this.recordReaderImpl.setEndRow(endRow); }
/** * @param rowFilter the {@link Filter} to be used. */ public void setRowFilter(Filter rowFilter) { this.recordReaderImpl.setRowFilter(rowFilter); }
/** * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. * * @throws IOException */ protected void closeTable() throws IOException { close(table, connection); table = null; connection = null; }
@Override public ImmutableBytesWritable createKey() { return trr.createKey(); }
@Override public RecordReader<ImmutableBytesWritable, Result> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job); }
/** * Setup a table with two rows and values. * * @param tableName the name of the table to create * @return A Table instance for the created table. * @throws IOException */ public static Table createTable(byte[] tableName) throws IOException { return createTable(tableName, new byte[][] { FAMILY }); }
public float getProgress() { // Depends on the total number of tuples and getPos return this.recordReaderImpl.getPos(); }