@Override public RecordReader<NullWritable, CompactorInputSplit> getRecordReader( InputSplit inputSplit, JobConf entries, Reporter reporter) throws IOException { return new CompactorRecordReader((CompactorInputSplit)inputSplit); }
public void init(AtomicBoolean stop, AtomicBoolean looped) throws Exception { setPriority(MIN_PRIORITY); setDaemon(true); // this means the process will exit without waiting for this thread this.stop = stop; this.looped = looped; }
@Override public boolean next(NullWritable key, CompactorInputSplit compactorInputSplit) throws IOException { if (split != null) { compactorInputSplit.set(split); split = null; return true; } return false; }
Worker worker = new Worker(); worker.start(); worker.waitUntilReady(); // <- ADDED worker.handler.sendMessage(...);
@Override public CompactorInputSplit createValue() { return new CompactorInputSplit(); }
public static StatsUpdater init(CompactionInfo ci, List<String> columnListForStats, HiveConf conf, String userName) { return new StatsUpdater(ci, columnListForStats, conf, userName); }
protected void addDeltaFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords, int numBuckets, boolean allBucketsPresent) throws Exception { addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, numBuckets, allBucketsPresent); }
protected void startInitiator() throws Exception { startThread('i', true); }
protected Table newTable(String dbName, String tableName, boolean partitioned, Map<String, String> parameters) throws TException { return newTable(dbName, tableName, partitioned, parameters, null, false); }
@Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path, Options options) throws IOException { return new MockRecordWriter(path, options); }
protected void burnThroughTransactions(String dbName, String tblName, int num) throws MetaException, NoSuchTxnException, TxnAbortedException { burnThroughTransactions(dbName, tblName, num, null, null); }
protected Partition newPartition(Table t, String value) throws Exception { return newPartition(t, value, null); }
protected void addBaseFile(Table t, Partition p, long maxTxn, int numRecords, int numBuckets, boolean allBucketsPresent) throws Exception { addFile(t, p, 0, maxTxn, numRecords, FileType.BASE, numBuckets, allBucketsPresent); }
protected void startCleaner() throws Exception { startThread('c', true); }
protected Table newTempTable(String tableName) throws TException { return newTable("default", tableName, false, null, null, true); }
protected void addDeltaFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords) throws Exception { addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, 2, true); }
protected void startWorker() throws Exception { startThread('w', true); }
protected void addLengthFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords) throws Exception { addFile(t, p, minTxn, maxTxn, numRecords, FileType.LENGTH_FILE, 2, true); }
protected void addBaseFile(Table t, Partition p, long maxTxn, int numRecords) throws Exception { addFile(t, p, 0, maxTxn, numRecords, FileType.BASE, 2, true); }