@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V2_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeByte(MINOR_VERSION_V1); out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); out.writeUTF(entry.getValue()); } } }
public void close() throws IOException { logout.writeByte(CLOSE_EVENT); logout.close(); }
/** Create a file with the name <code>file</code> and * a length of <code>fileSize</code>. The file is filled with character 'a'. */ private void genFile(Path file, long fileSize) throws IOException { FSDataOutputStream out = fs.create(file, true, getConf().getInt("io.file.buffer.size", 4096), (short)getConf().getInt("dfs.replication", 3), fs.getDefaultBlockSize()); for(long i=0; i<fileSize; i++) { out.writeByte('a'); } out.close(); }
/** Create a file with a length of <code>fileSize</code>. * The file is filled with 'a'. */ private void genFile(Path file, long fileSize) throws IOException { long startTime = System.currentTimeMillis(); FSDataOutputStream out = fs.create(file, true, getConf().getInt("io.file.buffer.size", 4096), (short)getConf().getInt("dfs.replication", 3), fs.getDefaultBlockSize()); executionTime[CREATE] += (System.currentTimeMillis()-startTime); totalNumOfOps[CREATE]++; for (long i=0; i<fileSize; i++) { out.writeByte('a'); } startTime = System.currentTimeMillis(); out.close(); executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime); totalNumOfOps[WRITE_CLOSE]++; }
public void log(Mutation m) throws IOException { // write event type logout.writeByte(MUTATION_EVENT); // write event m.write(logout); logout.flush(); }
@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V2_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeByte(MINOR_VERSION_V1); out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); out.writeUTF(entry.getValue()); } } }
@Test /** Make sure that the quota is decremented correctly when a block is abandoned */ public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)fs; // Setting diskspace quota to 3MB dfs.setQuota(new Path("/"), FSConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB. String src = FILE_NAME_PREFIX + "test_quota1"; FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024); for (int i = 0; i < 1024; i++) { fout.writeByte(123); } // Shutdown one datanode, causing the block abandonment. cluster.getDataNodes().get(0).shutdown(); // Close the file, new block will be allocated with 2MB pending size. try { fout.close(); } catch (QuotaExceededException e) { fail("Unexpected quota exception when closing fout"); } } }
@Override public long writeIntermediateFooter() throws IOException { // flush any buffered rows flushStripe(); // write a footer if (stripesAtLastFlush != stripes.size()) { if (callback != null) { callback.preFooterWrite(callbackContext); } int metaLength = writeMetadata(); int footLength = writeFooter(rawWriter.getPos() - metaLength); rawWriter.writeByte(writePostScript(footLength, metaLength)); stripesAtLastFlush = stripes.size(); rawWriter.hflush(); } return rawWriter.getPos(); }
@Override public long writeIntermediateFooter() throws IOException { // flush any buffered rows flushStripe(); // write a footer if (stripesAtLastFlush != stripes.size()) { if (callback != null) { callback.preFooterWrite(callbackContext); } int metaLength = writeMetadata(); int footLength = writeFooter(rawWriter.getPos() - metaLength); rawWriter.writeByte(writePostScript(footLength, metaLength)); stripesAtLastFlush = stripes.size(); rawWriter.hflush(); } return rawWriter.getPos(); }
@Override public void close() throws IOException { if (callback != null) { callback.preFooterWrite(callbackContext); } // remove us from the memory manager so that we don't get any callbacks memoryManager.removeWriter(path); // actually close the file flushStripe(); int metadataLength = writeMetadata(); int footerLength = writeFooter(rawWriter.getPos() - metadataLength); rawWriter.writeByte(writePostScript(footerLength, metadataLength)); rawWriter.close(); }
@Override public void close() throws IOException { if (callback != null) { callback.preFooterWrite(callbackContext); } // remove us from the memory manager so that we don't get any callbacks memoryManager.removeWriter(path); // actually close the file flushStripe(); int metadataLength = writeMetadata(); int footerLength = writeFooter(rawWriter.getPos() - metadataLength); rawWriter.writeByte(writePostScript(footerLength, metadataLength)); rawWriter.close(); }
@Override public long writePostScript(OrcProto.PostScript.Builder builder) throws IOException { builder.setFooterLength(footerLength); builder.setMetadataLength(metadataLength); OrcProto.PostScript ps = builder.build(); // need to write this uncompressed long startPosn = rawWriter.getPos(); ps.writeTo(rawWriter); long length = rawWriter.getPos() - startPosn; if (length > 255) { throw new IllegalArgumentException("PostScript too large at " + length); } rawWriter.writeByte((int)length); return rawWriter.getPos(); }
@Override public synchronized long writeIntermediateFooter() throws IOException { // flush any buffered rows flushStripe(); // write a footer if (stripesAtLastFlush != stripes.size()) { if (callback != null) { callback.preFooterWrite(callbackContext); } int metaLength = writeMetadata(rawWriter.getPos()); int footLength = writeFooter(rawWriter.getPos() - metaLength); rawWriter.writeByte(writePostScript(footLength, metaLength)); stripesAtLastFlush = stripes.size(); OrcInputFormat.SHIMS.hflush(rawWriter); } return rawWriter.getPos(); }
@Override public void close() throws IOException { if (callback != null) { callback.preFooterWrite(callbackContext); } // remove us from the memory manager so that we don't get any callbacks memoryManager.removeWriter(path); // actually close the file synchronized (this) { flushStripe(); int metadataLength = writeMetadata(rawWriter.getPos()); int footerLength = writeFooter(rawWriter.getPos() - metadataLength); rawWriter.writeByte(writePostScript(footerLength, metadataLength)); rawWriter.close(); } }
/** Create a file with the name <code>file</code> and * a length of <code>fileSize</code>. The file is filled with character 'a'. */ private void genFile(Path file, long fileSize) throws IOException { FSDataOutputStream out = fc.create(file, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), CreateOpts.createParent(), CreateOpts.bufferSize(4096), CreateOpts.repFac((short) 3)); for(long i=0; i<fileSize; i++) { out.writeByte('a'); } out.close(); }
/** Create a file with the name <code>file</code> and * a length of <code>fileSize</code>. The file is filled with character 'a'. */ private void genFile(Path file, long fileSize) throws IOException { FSDataOutputStream out = fc.create(file, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), CreateOpts.createParent(), CreateOpts.bufferSize(4096), CreateOpts.repFac((short) 3)); for(long i=0; i<fileSize; i++) { out.writeByte('a'); } out.close(); }
@Override public void writePostScript(PostScript.Builder builder) throws IOException { builder.setCompression(writeCompressionKind(compress)); builder.setFooterLength(footerLength); builder.setMetadataLength(metadataLength); if (compress != CompressionKind.NONE) { builder.setCompressionBlockSize(bufferSize); } PostScript ps = builder.build(); // need to write this uncompressed long startPosn = rawWriter.getPos(); ps.writeTo(rawWriter); long length = rawWriter.getPos() - startPosn; if (length > 255) { throw new IllegalArgumentException("PostScript too large at " + length); } rawWriter.writeByte((int)length); }
@Test /** Make sure that the quota is decremented correctly when a block is abandoned */ public void testQuotaUpdatedWhenBlockAbandoned() throws IOException { // Setting diskspace quota to 3MB fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024); // Start writing a file with 2 replicas to ensure each datanode has one. // Block Size is 1MB. String src = FILE_NAME_PREFIX + "test_quota1"; FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024); for (int i = 0; i < 1024; i++) { fout.writeByte(123); } // Shutdown one datanode, causing the block abandonment. cluster.getDataNodes().get(0).shutdown(); // Close the file, new block will be allocated with 2MB pending size. try { fout.close(); } catch (QuotaExceededException e) { fail("Unexpected quota exception when closing fout"); } } }
/** * Similar with testRenameUCFileInSnapshot, but do renaming first and then * append file without closing it. Unit test for HDFS-5425. */ @Test public void testAppendFileAfterRenameInSnapshot() throws Exception { final Path test = new Path("/test"); final Path foo = new Path(test, "foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); // rename bar --> bar2 final Path bar2 = new Path(foo, "bar2"); hdfs.rename(bar, bar2); // append file and keep it as underconstruction. FSDataOutputStream out = hdfs.append(bar2); out.writeByte(0); ((DFSOutputStream) out.getWrappedStream()).hsync( EnumSet.of(SyncFlag.UPDATE_LENGTH)); // save namespace and restart restartClusterAndCheckImage(true); }