@Override public void checkOutputSpecs(FileSystem ignored, JobConf jc) throws IOException { //delegate to the new api Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); checkOutputSpecs(jobContext); }
@Override public void checkOutputSpecs(FileSystem fs, JobConf jc) throws IOException { //obtain delegation tokens for the job if (UserGroupInformation.getCurrentUser().hasKerberosCredentials()) { TableMapReduceUtil.initCredentials(jc); } String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME); jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); try { checkOutputSpecs(jobContext); } catch (InterruptedException e) { throw new IOException(e); } }
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] paths = FileInputFormat.getInputPaths(jobContext);
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext);
.newJobContext(new Job(jobConf))); boolean splitByStats = jobConf.getBoolean(PhoenixStorageHandlerConstants.SPLIT_BY_STATS, false);
@Override public void checkOutputSpecs(FileSystem ignored, JobConf jc) throws IOException { //delegate to the new api Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); checkOutputSpecs(jobContext); }
@Override public void checkOutputSpecs(FileSystem ignored, JobConf jc) throws IOException { //delegate to the new api Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); checkOutputSpecs(jobContext); }
@Override public void checkOutputSpecs(FileSystem fs, JobConf jc) throws IOException { //obtain delegation tokens for the job TableMapReduceUtil.initCredentials(jc); String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME); jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); try { checkOutputSpecs(jobContext); } catch (InterruptedException e) { throw new IOException(e); } }
/** * Update the out table, and output an empty key as the key. * * @param jc the job configuration file * @param finalOutPath the final output table name * @param valueClass the value class * @param isCompressed whether the content is compressed or not * @param tableProperties the table info of the corresponding table * @param progress progress used for status report * @return the RecordWriter for the output file */ @Override public void checkOutputSpecs(FileSystem fs, JobConf jc) throws IOException { //obtain delegation tokens for the job if (UserGroupInformation.getCurrentUser().hasKerberosCredentials()) { TableMapReduceUtil.initCredentials(jc); } String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME); jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName); Job job = new Job(jc); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); try { checkOutputSpecs(jobContext); } catch (InterruptedException e) { throw new IOException(e); } }
@Override public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { try (DatasetAccessor datasetAccessor = new DatasetAccessor(jobConf)) { try { datasetAccessor.initialize(); } catch (Exception e) { throw new IOException("Could not get dataset", e); } try (RecordScannable recordScannable = datasetAccessor.getDataset()) { Job job = new Job(jobConf); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext); List<Split> dsSplits = recordScannable.getSplits(); InputSplit[] inputSplits = new InputSplit[dsSplits.size()]; for (int i = 0; i < dsSplits.size(); i++) { inputSplits[i] = new DatasetInputSplit(dsSplits.get(i), tablePaths[0]); } return inputSplits; } } }
@Override public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { try (DatasetAccessor datasetAccessor = new DatasetAccessor(jobConf)) { try { datasetAccessor.initialize(); } catch (Exception e) { throw new IOException("Could not get dataset", e); } try (RecordScannable recordScannable = datasetAccessor.getDataset()) { Job job = new Job(jobConf); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] tablePaths = FileInputFormat.getInputPaths(jobContext); List<Split> dsSplits = recordScannable.getSplits(); InputSplit[] inputSplits = new InputSplit[dsSplits.size()]; for (int i = 0; i < dsSplits.size(); i++) { inputSplits[i] = new DatasetInputSplit(dsSplits.get(i), tablePaths[0]); } return inputSplits; } } }
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path [] tablePaths = FileInputFormat.getInputPaths(jobContext);
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(new Job(job)); Path [] tablePaths = FileInputFormat.getInputPaths(jobContext); int i = 0;
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(new Job(job)); Path [] tablePaths = FileInputFormat.getInputPaths(jobContext); int i = 0;
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(new Job(job)); Path [] tablePaths = FileInputFormat.getInputPaths(jobContext); int i = 0;
JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path [] tablePaths = FileInputFormat.getInputPaths(jobContext);
JobContext context = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(jobConf)); Path[] tablePaths = FileInputFormat.getInputPaths(context);
JobContext context = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(jobConf)); Path[] tablePaths = FileInputFormat.getInputPaths(context);
private StreamInputSplitFinder<InputSplit> getSplitFinder(JobConf conf) throws IOException { // first get the context we are in ContextManager.Context context = ContextManager.getContext(conf); Preconditions.checkNotNull(context); StreamConfig streamConfig = context.getStreamConfig(getStreamId(conf)); // make sure we get the current generation so we don't read events that occurred before a truncate. Location streamPath = StreamUtils.createGenerationLocation(streamConfig.getLocation(), StreamUtils.getGeneration(streamConfig)); StreamInputSplitFinder.Builder builder = StreamInputSplitFinder.builder(streamPath.toURI()); // Get the Hive table path for the InputSplit created. It is just to satisfy hive. The InputFormat never uses it. JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(conf)); final Path[] tablePaths = FileInputFormat.getInputPaths(jobContext); return setupBuilder(conf, streamConfig, builder).build(new StreamInputSplitFactory<InputSplit>() { @Override public InputSplit createSplit(Path eventPath, Path indexPath, long startTime, long endTime, long start, long length, @Nullable String[] locations) { return new StreamInputSplit(tablePaths[0], eventPath, indexPath, startTime, endTime, start, length, locations); } }); }