private void initCopyTableMapperReducerJob(Job job, Scan scan) throws IOException { Class<? extends TableMapper> mapper = bulkload ? CellImporter.class : Importer.class; if (readingSnapshot) { TableMapReduceUtil.initTableSnapshotMapperJob(snapshot, scan, mapper, null, null, job, true, generateUniqTempDir(true)); } else { TableMapReduceUtil.initTableMapperJob(tableName, scan, mapper, null, null, job); } }
@Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); }
LOG.info( "Using source snapshot-" + sourceSnapshotName + " with temp dir:" + sourceSnapshotTmpDir); TableMapReduceUtil.initTableSnapshotMapperJob(sourceSnapshotName, scan, Verifier.class, null, null, job, true, snapshotTempPath); restoreSnapshotForPeerCluster(conf, peerQuorumAddress);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, true, tableDir);
TableMapReduceUtil.initTableSnapshotMapperJob( this.snapshotName, scan,
@Test public void testWithMockedMapReduceWithSplitsPerRegion() throws Exception { setupCluster(); String snapshotName = "testWithMockedMapReduceMultiRegion"; final TableName tableName = TableName.valueOf(name.getMethodName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false); Job job = new Job(conf); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); // test scan with startRow and stopRow Scan scan = new Scan(bbc, yya); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), 5); verifyWithMockedMapReduce(job, 10, 40, bbc, yya); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
@Test public void testWithMockedMapReduceWithNoStartRowStopRow() throws Exception { setupCluster(); String snapshotName = "testWithMockedMapReduceMultiRegion"; final TableName tableName = TableName.valueOf(name.getMethodName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false); Job job = new Job(conf); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); // test scan without startRow and stopRow Scan scan2 = new Scan(); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan2, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), 5); verifyWithMockedMapReduce(job, 10, 50, HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
@Test public void testInitTableSnapshotMapperJobConfig() throws Exception { setupCluster(); final TableName tableName = TableName.valueOf(name.getMethodName()); String snapshotName = "foo"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. Assert.assertEquals( "Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); Assert.assertEquals( "Snapshot job should not use BucketCache.", 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
Job job = Job.getInstance(conf, "Analyze data in snapshot " + table); job.setJarByClass(AnalyzeSnapshotData.class); TableMapReduceUtil.initTableSnapshotMapperJob(snapshot, scan, AnalyzeMapper.class, Text.class, IntWritable.class, job, true,
@Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); }
@Override public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); }
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, true, tableDir);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
@Test public void testWithMockedMapReduceWithSplitsPerRegion() throws Exception { setupCluster(); String snapshotName = "testWithMockedMapReduceMultiRegion"; final TableName tableName = TableName.valueOf(name.getMethodName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false); Job job = new Job(conf); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); // test scan with startRow and stopRow Scan scan = new Scan(bbc, yya); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), 5); verifyWithMockedMapReduce(job, 10, 40, bbc, yya); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
@Test public void testWithMockedMapReduceWithNoStartRowStopRow() throws Exception { setupCluster(); String snapshotName = "testWithMockedMapReduceMultiRegion"; final TableName tableName = TableName.valueOf(name.getMethodName()); try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 10); Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, false); Job job = new Job(conf); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); // test scan without startRow and stopRow Scan scan2 = new Scan(); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan2, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), 5); verifyWithMockedMapReduce(job, 10, 50, HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
@Test public void testInitTableSnapshotMapperJobConfig() throws Exception { setupCluster(); final TableName tableName = TableName.valueOf(name.getMethodName()); String snapshotName = "foo"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. Assert.assertEquals( "Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); Assert.assertEquals( "Snapshot job should not use BucketCache.", 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }
@Test public void testInitTableSnapshotMapperJobConfig() throws Exception { setupCluster(); final TableName tableName = TableName.valueOf(name.getMethodName()); String snapshotName = "foo"; try { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. Assert.assertEquals( "Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); Assert.assertEquals( "Snapshot job should not use BucketCache.", 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); tearDownCluster(); } }