/** * Write the Kafka Consumer PID and Epoch to checkpoint file {@link TransactionalKafkaWriter#openTxFileName}. */ private void persistTxState() { LOG.info("Committing state to path [{}] by [{}]", openTxFileName.toString(), writerIdTopicId); try (FSDataOutputStream outStream = fileSystem.create(openTxFileName)) { outStream.writeLong(producerId); outStream.writeShort(producerEpoch); } catch (Exception e) { sendExceptionRef.compareAndSet(null, e); } }
private void markOutputDirAsCompleted(DateTime jobStartTime) throws IOException { Path completionFilePath = new Path(this.dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME); try (FSDataOutputStream completionFileStream = this.fs.create(completionFilePath)) { completionFileStream.writeLong(jobStartTime.getMillis()); } }
@Override public void flush() throws IOException { // We only support flushes on files with multiple transactions, because // flushes create significant overhead in HDFS. Record updaters with a // single transaction should be closed rather than flushed. if (flushLengths == null) { throw new IllegalStateException("Attempting to flush a RecordUpdater on " + path + " with a single transaction."); } if (writer == null) { writer = OrcFile.createWriter(path, writerOptions); } long len = writer.writeIntermediateFooter(); flushLengths.writeLong(len); OrcInputFormat.SHIMS.hflush(flushLengths); }
/** @throws Exception If failed. */ @Test public void testAppend() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "someFile"); int cnt = 1024; FSDataOutputStream out = fs.create(file, true, 1024); for (int i = 0; i < cnt; i++) out.writeLong(i); out.close(); out = fs.append(file); for (int i = cnt; i < cnt * 2; i++) out.writeLong(i); out.close(); FSDataInputStream in = fs.open(file, 1024); for (int i = 0; i < cnt * 2; i++) assertEquals(i, in.readLong()); in.close(); }
@Override public void flush() throws IOException { initWriter(); // streaming ingest writer with single transaction batch size, in which case the transaction is // either committed or aborted. In either cases we don't need flush length file but we need to // flush intermediate footer to reduce memory pressure. Also with HIVE-19206, streaming writer does // automatic memory management which would require flush of open files without actually closing it. if (flushLengths == null) { // transaction batch size = 1 case writer.writeIntermediateFooter(); } else { // transaction batch size > 1 case long len = writer.writeIntermediateFooter(); flushLengths.writeLong(len); OrcInputFormat.SHIMS.hflush(flushLengths); } bufferedRows = 0; //multiple transactions only happen for streaming ingest which only allows inserts assert deleteEventWriter == null : "unexpected delete writer for " + path; }
/** @throws Exception If failed. */ @Test public void testOpen() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "someFile"); int cnt = 2 * 1024; try (FSDataOutputStream out = fs.create(file, true, 1024)) { for (long i = 0; i < cnt; i++) out.writeLong(i); } assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner()); try (FSDataInputStream in = fs.open(file, 1024)) { for (long i = 0; i < cnt; i++) assertEquals(i, in.readLong()); } }
/** @throws Exception If failed. */ @Test public void testAppend() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "someFile"); int cnt = 1024; FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); for (int i = 0; i < cnt; i++) out.writeLong(i); out.close(); out = fs.create(file, EnumSet.of(CreateFlag.APPEND), Options.CreateOpts.perms(FsPermission.getDefault())); for (int i = cnt; i < cnt * 2; i++) out.writeLong(i); out.close(); FSDataInputStream in = fs.open(file, 1024); for (int i = 0; i < cnt * 2; i++) assertEquals(i, in.readLong()); in.close(); }
out.writeLong(offsetIndexMeta);
/** @throws Exception If failed. */ @Test public void testOpen() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "someFile"); int cnt = 2 * 1024; try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()))) { for (long i = 0; i < cnt; i++) out.writeLong(i); } assertEquals(getClientFsUser(), fs.getFileStatus(file).getOwner()); try (FSDataInputStream in = fs.open(file, 1024)) { for (long i = 0; i < cnt; i++) assertEquals(i, in.readLong()); } }
flushLengths.writeLong(0); OrcInputFormat.SHIMS.hflush(flushLengths); } else {
private static void writeFile(FileSystem fs, Path f, long fill, int parts) throws IOException { FSDataOutputStream out = fs.create(f, false); CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32()); DataOutputStream dout = new DataOutputStream(iout); for (int i = 0; i < parts; ++i) { for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) { dout.writeLong(fill); } } out.writeLong(iout.getChecksum().getValue()); dout.close(); } }
public static void writeLong(FileSystem fs, String path, long val) { try { FSDataOutputStream out = fs.create(new Path(path), true); out.writeLong(val); out.close(); } catch (IOException e) { e.printStackTrace(); } }
/** * Write the Kafka Consumer PID and Epoch to checkpoint file {@link TransactionalKafkaWriter#openTxFileName}. */ private void persistTxState() { LOG.info("Committing state to path [{}] by [{}]", openTxFileName.toString(), writerIdTopicId); try (FSDataOutputStream outStream = fileSystem.create(openTxFileName)) { outStream.writeLong(producerId); outStream.writeShort(producerEpoch); } catch (Exception e) { sendExceptionRef.compareAndSet(null, e); } }
private void markOutputDirAsCompleted(DateTime jobStartTime) throws IOException { Path completionFilePath = new Path(this.dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME); try (FSDataOutputStream completionFileStream = this.fs.create(completionFilePath)) { completionFileStream.writeLong(jobStartTime.getMillis()); } }
private void markOutputDirAsCompleted(DateTime jobStartTime) throws IOException { Path completionFilePath = new Path(this.dataset.outputPath(), MRCompactor.COMPACTION_COMPLETE_FILE_NAME); try (FSDataOutputStream completionFileStream = this.fs.create(completionFilePath)) { completionFileStream.writeLong(jobStartTime.getMillis()); } }