/** * If the reopened file did not end at chunk boundary and the above * write filled up its partial chunk. Tell the summer to generate full * crc chunks from now on. */ protected void adjustChunkBoundary() { if (appendChunk && bytesCurBlock % bytesPerChecksum == 0) { appendChunk = false; resetChecksumBufSize(); } if (!appendChunk) { final int psize = (int) Math.min(blockSize - bytesCurBlock, writePacketSize); computePacketChunkSize(psize, bytesPerChecksum); } }
/** * If the reopened file did not end at chunk boundary and the above * write filled up its partial chunk. Tell the summer to generate full * crc chunks from now on. */ protected void adjustChunkBoundary() { if (appendChunk && bytesCurBlock % bytesPerChecksum == 0) { appendChunk = false; resetChecksumBufSize(); } if (!appendChunk) { final int psize = (int) Math.min(blockSize - bytesCurBlock, writePacketSize); computePacketChunkSize(psize, bytesPerChecksum); } }
/** Construct a new output stream for creating a file. */ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, null); if (favoredNodes != null && favoredNodes.length != 0) { streamer.setFavoredNodes(favoredNodes); } }
/** Construct a new output stream for creating a file. */ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, null); if (favoredNodes != null && favoredNodes.length != 0) { streamer.setFavoredNodes(favoredNodes); } }
/** Construct a new output stream for creating a file. */ protected DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes, boolean createStreamer) { this(dfsClient, src, flag, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().getWritePacketSize(), bytesPerChecksum); if (createStreamer) { streamer = new DataStreamer(stat, null, dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager, favoredNodes, addBlockFlags); } }
computePacketChunkSize(0, freeInCksum); setChecksumBufSize(freeInCksum); getStreamer().setAppendChunk(true); computePacketChunkSize( Math.min(dfsClient.getConf().getWritePacketSize(), freeInLastBlock), bytesPerChecksum);
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); }
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); }
/** * If the reopened file did not end at chunk boundary and the above * write filled up its partial chunk. Tell the summer to generate full * crc chunks from now on. */ protected void adjustChunkBoundary() { if (getStreamer().getAppendChunk() && getStreamer().getBytesCurBlock() % bytesPerChecksum == 0) { getStreamer().setAppendChunk(false); resetChecksumBufSize(); } if (!getStreamer().getAppendChunk()) { final int psize = (int) Math .min(blockSize - getStreamer().getBytesCurBlock(), writePacketSize); computePacketChunkSize(psize, bytesPerChecksum); } }
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, flags, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); this.fileEncryptionInfo = stat.getFileEncryptionInfo(); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block streamer = new DataStreamer(lastBlock, stat, dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager); getStreamer().setBytesCurBlock(lastBlock.getBlockSize()); adjustPacketChunkSize(stat); getStreamer().setPipelineInConstruction(lastBlock); } else { computePacketChunkSize(dfsClient.getConf().getWritePacketSize(), bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null, dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager, favoredNodes, addBlockFlags); } }