initializeTable(conn, tableSplit.getTable()); setScan(HiveHBaseInputFormatUtil.getScan(jobConf)); recordReader = createRecordReader(tableSplit, tac); try { recordReader.initialize(tableSplit, tac); } catch (InterruptedException e) { closeTable(); // Free up the HTable connections conn.close(); throw new IOException("Failed to initialize RecordReader", e);
IndexPredicateAnalyzer analyzer = newIndexPredicateAnalyzer(keyColName, isKeyComparable, tsColName);
initializeTable(conn, tableName); Scan scan = createFilterScan(jobConf, iKey, iTimestamp, HiveHBaseInputFormatUtil .getStorageFormatOfKey(keyMapping.mappingSpec, jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string"))); setScan(scan); closeTable(); conn.close();
setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName))); String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); convertFilter(jobConf, scan, null, iKey, getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec, jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string"))); setScan(scan); Job job = new Job(jobConf); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
TableSplit tableSplit = hbaseSplit.getTableSplit(); setHTable(HiveHBaseInputFormatUtil.getTable(jobConf)); setScan(HiveHBaseInputFormatUtil.getScan(jobConf)); recordReader = createRecordReader(tableSplit, tac); try { recordReader.initialize(tableSplit, tac);
IndexPredicateAnalyzer analyzer = newIndexPredicateAnalyzer(colName,colType, isKeyBinary); byte [] constantVal = getConstantVal(writable, objInspector, isKeyBinary); String comparisonOp = sc.getComparisonOp(); stopRow = getNextBA(constantVal); } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan".equals(comparisonOp)){ stopRow = constantVal; } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan" .equals(comparisonOp)){ startRow = getNextBA(constantVal); } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan" .equals(comparisonOp)){ stopRow = getNextBA(constantVal); } else { throw new IOException(comparisonOp + " is not a supported comparison operator");
setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName))); String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true); Scan scan = createFilterScan(jobConf, iKey, iTimestamp, HiveHBaseInputFormatUtil.getStorageFormatOfKey(keyMapping.mappingSpec, jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string"))); setScan(scan);
newIndexPredicateAnalyzer(keyColName, isKeyComparable, tsColName); List<IndexSearchCondition> keyConditions = split.get(keyColName); if (keyConditions != null && !keyConditions.isEmpty()) { setupKeyRange(scan, keyConditions, isKeyBinary); setupTimeRange(scan, tsConditions);
byte[] constantVal = getConstantVal(writable, objInspector, isBinary); String comparisonOp = sc.getComparisonOp(); stopRow = getNextBA(constantVal); } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan".equals(comparisonOp)){ stopRow = constantVal; } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan" .equals(comparisonOp)){ startRow = getNextBA(constantVal); } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan" .equals(comparisonOp)){ stopRow = getNextBA(constantVal); } else { throw new IOException(comparisonOp + " is not a supported comparison operator");
@Override public InputSplit[] getSplits(final JobConf jobConf, final int numSplits) throws IOException { synchronized (HBASE_TABLE_MONITOR) { final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); if (ugi == null) { return getSplitsInternal(jobConf, numSplits); } try { return ugi.doAs(new PrivilegedExceptionAction<InputSplit[]>() { @Override public InputSplit[] run() throws IOException { return getSplitsInternal(jobConf, numSplits); } }); } catch (InterruptedException e) { throw new IOException(e); } } }
@Override protected void finalize() throws Throwable { try { closeTable(); } finally { super.finalize(); } } }
TableSplit tableSplit = hbaseSplit.getSplit(); String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME); setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName))); String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf); tableSplit = convertFilter(jobConf, scan, tableSplit, iKey, getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec, jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string"))); setScan(scan); Job job = new Job(jobConf); TaskAttemptContext tac = ShimLoader.getHadoopShims().newTaskAttemptContext( recordReader = createRecordReader(tableSplit, tac);
@Override public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { synchronized (hbaseTableMonitor) { return getSplitsInternal(jobConf, numSplits); } }
ColumnMapping keyMapping = hBaseSerDe.getHBaseSerdeParam().getKeyColumnMapping(); ColumnMapping tsMapping = hBaseSerDe.getHBaseSerdeParam().getTimestampColumnMapping(); IndexPredicateAnalyzer analyzer = HiveHBaseTableInputFormat.newIndexPredicateAnalyzer( keyMapping.columnName, keyMapping.isComparable(), tsMapping == null ? null : tsMapping.columnName);
split(",")[keyColPos]; IndexPredicateAnalyzer analyzer = HiveHBaseTableInputFormat.newIndexPredicateAnalyzer(columnNames.get(keyColPos), keyColType, hbaseSerde.getStorageFormatOfCol(keyColPos).get(0)); List<IndexSearchCondition> searchConditions =
ColumnMapping keyMapping = hBaseSerDe.getHBaseSerdeParam().getKeyColumnMapping(); ColumnMapping tsMapping = hBaseSerDe.getHBaseSerdeParam().getTimestampColumnMapping(); IndexPredicateAnalyzer analyzer = HiveHBaseTableInputFormat.newIndexPredicateAnalyzer( keyMapping.columnName, keyMapping.isComparable(), tsMapping == null ? null : tsMapping.columnName);