for (Map.Entry<Integer, BucketTracker> e : splitToBucketMap.entrySet()) { BucketTracker bt = e.getValue(); splits.add(new CompactorInputSplit(entries, e.getKey(), bt.buckets, bt.sawBase ? baseDir : null, deltaDirs));
new CompactorMR.CompactorInputSplit(conf, 3, files, new Path(basename), deltas); Assert.assertEquals(520L, split.getLength()); String[] locations = split.getLocations(); Assert.assertEquals(1, locations.length); Assert.assertEquals("localhost", locations[0]); split.write(out); split = new CompactorMR.CompactorInputSplit(); DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray())); split.readFields(in); Assert.assertEquals(3, split.getBucket()); Assert.assertEquals(basename, split.getBaseDir().toString()); deltas = split.getDeltaDirs(); Assert.assertEquals(2, deltas.length); Assert.assertEquals(delta1, deltas[0].toString());
for (Map.Entry<Integer, BucketTracker> e : splitToBucketMap.entrySet()) { BucketTracker bt = e.getValue(); splits.add(new CompactorInputSplit(entries, e.getKey(), bt.buckets, bt.sawBase ? baseDir : null, deltaDirs));
@Override public void map(WritableComparable key, CompactorInputSplit split, OutputCollector<NullWritable, NullWritable> nullWritableVOutputCollector, Reporter reporter) throws IOException { // This will only get called once, since CompactRecordReader only returns one record, // the input split. // Based on the split we're passed we go instantiate the real reader and then iterate on it // until it finishes. @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidInputFormat<WritableComparable, V> aif = instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME)); ValidTxnList txnList = new ValidReadTxnList(jobConf.get(ValidTxnList.VALID_TXNS_KEY)); boolean isMajor = jobConf.getBoolean(IS_MAJOR, false); AcidInputFormat.RawReader<V> reader = aif.getRawReader(jobConf, isMajor, split.getBucket(), txnList, split.getBaseDir(), split.getDeltaDirs()); RecordIdentifier identifier = reader.createKey(); V value = reader.createValue(); getWriter(reporter, reader.getObjectInspector(), split.getBucket()); while (reader.next(identifier, value)) { if (isMajor && reader.isDelete(value)) continue; writer.write(value); reporter.progress(); } }
@Override public CompactorInputSplit createValue() { return new CompactorInputSplit(); }
@Override public boolean next(NullWritable key, CompactorInputSplit compactorInputSplit) throws IOException { if (split != null) { compactorInputSplit.set(split); split = null; return true; } return false; }
aif.getRawReader(jobConf, isMajor, split.getBucket(), txnList, split.getBaseDir(), split.getDeltaDirs()); RecordIdentifier identifier = reader.createKey(); V value = reader.createValue(); getWriter(reporter, reader.getObjectInspector(), split.getBucket()); getDeleteEventWriter(reporter, reader.getObjectInspector(), split.getBucket());
for (Map.Entry<Integer, BucketTracker> e : splitToBucketMap.entrySet()) { BucketTracker bt = e.getValue(); splits.add(new CompactorInputSplit(entries, e.getKey(), bt.buckets, bt.sawBase ? baseDir : null, deltaDirs));
aif.getRawReader(jobConf, isMajor, split.getBucket(), writeIdList, split.getBaseDir(), split.getDeltaDirs()); RecordIdentifier identifier = reader.createKey(); V value = reader.createValue(); getWriter(reporter, reader.getObjectInspector(), split.getBucket()); getDeleteEventWriter(reporter, reader.getObjectInspector(), split.getBucket());
new CompactorMR.CompactorInputSplit(conf, 3, files, null, deltas); split.write(out); split = new CompactorMR.CompactorInputSplit(); DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray())); split.readFields(in); Assert.assertEquals(3, split.getBucket()); Assert.assertNull(split.getBaseDir()); deltas = split.getDeltaDirs(); Assert.assertEquals(2, deltas.length); Assert.assertEquals(delta1, deltas[0].toString());
@Override public CompactorInputSplit createValue() { return new CompactorInputSplit(); }
@Override public boolean next(NullWritable key, CompactorInputSplit compactorInputSplit) throws IOException { if (split != null) { compactorInputSplit.set(split); split = null; return true; } return false; }
@Override public CompactorInputSplit createValue() { return new CompactorInputSplit(); }
@Override public boolean next(NullWritable key, CompactorInputSplit compactorInputSplit) throws IOException { if (split != null) { compactorInputSplit.set(split); split = null; return true; } return false; }