@Override public void insert(Object record) throws IOException { updater.insert(writeId, record); }
@Override public void close() throws IOException { updater.close(false); updater = null; }
@Override public void delete(Object record) throws IOException { updater.delete(writeId, record); }
"2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"}; for(int i=0; i < values.length; ++i) { ru.insert(0, new BigRow(i, i, values[i], i, i)); ru.close(false); for(int i=0; i < values.length; ++i) { if (values[i] != null) { ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY)); ru.delete(1, new BigRow(9, 0, BUCKET_PROPERTY)); ru.close(false); for(int i=0; i < values.length - 1; ++i) { if (values[i] != null) { ru.update(100, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY)); ru.delete(100, new BigRow(8, 0, BUCKET_PROPERTY)); ru.update(100, new BigRow(7, 7, values[values.length - 1], 7, 7, 2, 1, BUCKET_PROPERTY)); ru.close(false); MyResult[] expected = new MyResult[10]; int k = 0;
for (long j = 0; j < NUM_ROWID_PER_OWID; ++j) { long payload = (i-1) * NUM_ROWID_PER_OWID + j; updater.insert(i, new DummyRow(payload, j, i, bucket)); updater.close(false); for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) { if (j % 2 == 0 && j % 3 != 0) { updater.delete(currTxnId, new DummyRow(-1, j, i, bucket)); updater.close(false); for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) { if (j % 2 != 0 && j % 3 == 0) { updater.delete(currTxnId, new DummyRow(-1, j, i, bucket)); updater.close(false); for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) { if (j % 2 == 0 && j % 3 == 0) { updater.delete(currTxnId, new DummyRow(-1, j, i, bucket)); updater.close(false);
String[][] values = {new String[]{"a", "b", "c", "d", "e"}, new String[]{"f", "g", "h", "i", "j"}}; for(int i=0; i < values[0].length; ++i) { ru.insert(1, new MyRow(values[0][i])); ru.close(false); ru = of.getRecordUpdater(root, options); for(int i=0; i < values[1].length; ++i) { ru.insert(2, new MyRow(values[1][i])); ru.close(false);
rowOutWriters[findWriterOffset(row)].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[findWriterOffset(row)].insert(conf.getTableWriteId(), row); } else { fpaths.updaters[writerOffset].update(conf.getTableWriteId(), row); } else if (conf.getWriteType() == AcidUtils.Operation.DELETE) { fpaths.updaters[writerOffset].delete(conf.getTableWriteId(), row); } else { throw new HiveException("Unknown write type " + conf.getWriteType().toString());
.finalDestination(root); RecordUpdater updater = new OrcRecordUpdater(root, options); updater.insert(11, new MyRow("first")); updater.insert(11, new MyRow("second")); updater.insert(11, new MyRow("third")); updater.flush(); updater.insert(12, new MyRow("fourth")); updater.insert(12, new MyRow("fifth")); updater.flush(); assertEquals(5L, updater.getStats().getRowCount()); updater.insert(20, new MyRow("sixth")); updater.close(false); reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs)); assertEquals(6, reader.getNumberOfRows()); assertEquals(6L, updater.getStats().getRowCount());
.tableProperties(tblProps); RecordUpdater updater = new OrcRecordUpdater(root, options); updater.insert(11, new MyRow("first")); updater.insert(11, new MyRow("second")); updater.insert(11, new MyRow("third")); updater.flush(); updater.insert(12, new MyRow("fourth")); updater.insert(12, new MyRow("fifth")); updater.flush(); assertEquals(true, outDump.contains("Compression size: 2048")); System.setOut(origOut); updater.close(false);
for(int i=0; i < values.length; ++i) { if (values[i] != null) { ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY)); ru.delete(1, new BigRow(9, 0, BUCKET_PROPERTY)); ru.close(false);//this doesn't create a key index presumably because writerOptions are not set on 'options' for(int i=0; i < values.length - 1; ++i) { if (values[i] != null) { ru.update(100, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY)); ru.delete(100, new BigRow(8, 0, BUCKET_PROPERTY)); ru.update(100, new BigRow(7, 7, values[values.length - 1], 7, 7, 2, 1, BUCKET_PROPERTY)); ru.close(false);
.finalDestination(root); RecordUpdater updater = new OrcRecordUpdater(root, options); updater.update(100, new MyRow("update", 30, 10, bucket)); updater.delete(100, new MyRow("", 60, 40, bucket)); assertEquals(-1L, updater.getStats().getRowCount()); updater.close(false); Path bucketPath = AcidUtils.createFilename(root, options);
@Override public void flush() throws StreamingIOFailure { try { for (RecordUpdater updater : updaters) { if (updater != null) { updater.flush(); } } } catch (IOException e) { throw new StreamingIOFailure("Unable to flush recordUpdater", e); } }
stats = prevFsp.updaters[0].getStats();
@Override public void update(Object record) throws IOException { updater.update(writeId, record); }
options.writingBase(true).maximumWriteId(100)); for(String v: values) { ru.insert(0, new MyRow(v)); ru.close(false); ru.update(200, new MyRow("update 1", 0, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 2", 2, 0, BUCKET_PROPERTY)); ru.update(200, new MyRow("update 3", 3, 0, BUCKET_PROPERTY)); ru.delete(200, new MyRow("", 7, 0, BUCKET_PROPERTY)); ru.delete(200, new MyRow("", 8, 0, BUCKET_PROPERTY)); ru.close(false);
updater.insert(0, new DummyRow(1, 0, 0, bucket)); updater.insert(0, new DummyRow(1, 1, 0, bucket)); updater.insert(0, new DummyRow(2, 2, 0, bucket)); updater.insert(10000001, new DummyRow(3, 0, 10000001, bucket)); updater.close(false); .maximumWriteId(10000004); updater = new OrcRecordUpdater(root, options); updater.delete(options.getMinimumWriteId(), new DummyRow(-1, 0, 0, bucket)); updater.delete(options.getMinimumWriteId(), new DummyRow(-1, 5, 10000003, bucket)); updater.close(false);
rowOutWriters[writerOffset].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[writerOffset].insert(conf.getTransactionId(), row); } else { fpaths.updaters[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset].update(conf.getTransactionId(), row); } else if (conf.getWriteType() == AcidUtils.Operation.DELETE) { fpaths.updaters[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset].delete(conf.getTransactionId(), row); } else { throw new HiveException("Unknown write type " + conf.getWriteType().toString());
String[] values= new String[]{"1", "2", "3", "4", "5"}; for(int i=0; i < values.length; ++i) { ru.insert(0, new MyRow(values[i])); ru.close(false); values = new String[]{"6", "7", "8"}; for(int i=0; i < values.length; ++i) { ru.insert(1, new MyRow(values[i])); ru.flush(); ru.flush(); values = new String[]{"9", "10"}; for(int i=0; i < values.length; ++i) { ru.insert(3, new MyRow(values[i])); ru.flush();