/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly * from snapshot files. * @param snapshotName The name of the snapshot (of a table) to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase * configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via * the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of * rootdir. After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); }
resetCacheConfig(job.getConfiguration());
initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration());
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers * and read directly from snapshot files. * * @param snapshotName The name of the snapshot (of a table) to read from. * @param columns The columns to scan. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, Class<? extends TableMap> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job); }
initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf);
TableMapReduceUtil.resetCacheConfig(hbaseConf);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job);
resetCacheConfig(job.getConfiguration());
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly * from snapshot files. * @param snapshotName The name of the snapshot (of a table) to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase * configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via * the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of * rootdir. After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); }
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly * from snapshot files. * @param snapshotName The name of the snapshot (of a table) to read from. * @param scan The scan instance with the columns, time range etc. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase * configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via * the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of * rootdir. After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); }
resetCacheConfig(job.getConfiguration());
initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration());
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers * and read directly from snapshot files. * * @param snapshotName The name of the snapshot (of a table) to read from. * @param columns The columns to scan. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, Class<? extends TableMap> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job); }
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers * and read directly from snapshot files. * * @param snapshotName The name of the snapshot (of a table) to read from. * @param columns The columns to scan. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, Class<? extends TableMap> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job); }
/** * Sets up the job for reading from a table snapshot. It bypasses hbase servers * and read directly from snapshot files. * * @param snapshotName The name of the snapshot (of a table) to read from. * @param columns The columns to scan. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should * have write permissions to this directory, and this should not be a subdirectory of rootdir. * After the job is finished, restore directory can be deleted. * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapJob(String snapshotName, String columns, Class<? extends TableMap> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job); }
initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf);
initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf, addDependencyJars, TableSnapshotInputFormat.class); org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job);
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(job);