private void deleteDeltaIfExists(Path partitionPath, long writeId, int bucketId) throws IOException { Path deltaPath = AcidUtils.createFilename(partitionPath, new AcidOutputFormat.Options(configuration) .bucket(bucketId) .minimumWriteId(writeId) .maximumWriteId(writeId)); FileSystem fileSystem = deltaPath.getFileSystem(configuration); if (fileSystem.exists(deltaPath)) { LOG.info("Deleting existing delta path: {}", deltaPath); fileSystem.delete(deltaPath, false); } }
MockRecordWriter(Path basedir, AcidOutputFormat.Options options) throws IOException { FileSystem fs = FileSystem.get(options.getConfiguration()); Path p = AcidUtils.createFilename(basedir, options); os = fs.create(p); }
.writingBase(true).isCompressed(false).maximumWriteId(maxTxn).bucket(0).statementId(-1) .visibilityTxnId(compactorTxnId); Path newBaseDir = AcidUtils.createFilename(toPath, options).getParent(); if (!fs.exists(fromPath)) { LOG.info(from + " not found. Assuming 0 splits. Creating " + newBaseDir);
.statementId(-1) .visibilityTxnId(CompactorMap.getCompactorTxnId(conf)); Path newDeltaDir = AcidUtils.createFilename(finalLocation, options).getParent(); LOG.info(context.getJobID() + ": " + tmpLocation + " not found. Assuming 0 splits. Creating " + newDeltaDir);
@Test public void testCreateFilenameLargeIds() throws Exception { Path p = new Path("/tmp"); Configuration conf = new Configuration(); AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .setOldStyle(true).bucket(123456789); assertEquals("/tmp/123456789_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) .minimumWriteId(1234567880) .maximumWriteId(1234567890) .writingBase(true) .setOldStyle(false); assertEquals("/tmp/base_1234567890/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingBase(false); assertEquals("/tmp/delta_1234567880_1234567890_0000/bucket_00023", AcidUtils.createFilename(p, options).toString()); }
.setOldStyle(true).bucket(1); assertEquals("/tmp/000001_0", AcidUtils.createFilename(p, options).toString()); options.bucket(123); assertEquals("/tmp/000123_0", AcidUtils.createFilename(p, options).toString()); options.bucket(23) .minimumWriteId(100) .setOldStyle(false); assertEquals("/tmp/base_0000200/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingBase(false); assertEquals("/tmp/delta_0000100_0000200_0000/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingDeleteDelta(true); assertEquals("/tmp/delete_delta_0000100_0000200_0000/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingDeleteDelta(false); options.statementId(-1); assertEquals("/tmp/delta_0000100_0000200/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingDeleteDelta(true); assertEquals("/tmp/delete_delta_0000100_0000200/bucket_00023", AcidUtils.createFilename(p, options).toString()); options.writingDeleteDelta(false); options.statementId(7); assertEquals("/tmp/delta_0000100_0000200_0007/bucket_00023", AcidUtils.createFilename(p, options).toString());
Path matchingBucket = AcidUtils.createFilename(options.getFinalDestination(), options.clone().statementId(pastStmt)); if(!fs.exists(matchingBucket)) { continue;
.statementId(statementId) .finalDestination(destLocation); return AcidUtils.createFilename(destLocation, options);
this.path = AcidUtils.createFilename(path, options); this.deleteEventWriter = null; this.deleteEventPath = null; this.deleteEventPath = AcidUtils.createFilename(path, optionsCloneForDelta.writingDeleteDelta(true));
@Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path, Options options) throws IOException { final Path filename = AcidUtils.createFilename(path, options); final OrcFile.WriterOptions opts = OrcFile.writerOptions(options.getConfiguration());
@Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path, Options options) throws IOException { final Path filename = AcidUtils.createFilename(path, options); final OrcFile.WriterOptions opts = OrcFile.writerOptions(options.getTableProperties(), options.getConfiguration());
this.path = AcidUtils.createFilename(partitionRoot, options); this.deleteEventWriter = null; this.deleteEventPath = null; this.deleteEventPath = AcidUtils.createFilename(partitionRoot, deleteOptions);
Path bucketPath = AcidUtils.createFilename(root, options); Path sidePath = OrcAcidUtils.getSideFile(bucketPath); DataInputStream side = fs.open(sidePath);
assertEquals(-1L, updater.getStats().getRowCount()); updater.close(false); Path bucketPath = AcidUtils.createFilename(root, options); bucketPath = AcidUtils.createFilename(root, options); reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs));
this.options = options; this.bucket.set(options.getBucket()); this.path = AcidUtils.createFilename(path, options); FileSystem fs = options.getFilesystem(); if (fs == null) {
@Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getRawRecordWriter(Path path, Options options) throws IOException { final Path filename = AcidUtils.createFilename(path, options); final OrcFile.WriterOptions opts = OrcFile.writerOptions(options.getConfiguration());