String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); blockReader = new BlockReaderFactory(dfs.getConf()). setFileName(file). setBlock(block).
/** * Ensures the configured writePacketSize never exceeds * PacketReceiver.MAX_PACKET_SIZE. */ private void initWritePacketSize() { writePacketSize = dfsClient.getConf().writePacketSize; if (writePacketSize > PacketReceiver.MAX_PACKET_SIZE) { LOG.warn( "Configured write packet exceeds {} bytes as max," + " using {} bytes.", PacketReceiver.MAX_PACKET_SIZE, PacketReceiver.MAX_PACKET_SIZE); writePacketSize = PacketReceiver.MAX_PACKET_SIZE; } }
@Override public short getDefaultReplication() { return dfs.getConf().getDefaultReplication(); }
private long computeTransferReadTimeout() { // transfer timeout multiplier based on the transfer size // One per 200 packets = 12.8MB. Minimum is 2. int multi = 2 + (int) (bytesSent / dfsClient.getConf().writePacketSize) / 200; return dfsClient.getDatanodeReadTimeout(multi); }
private long computeTransferReadTimeout() { // transfer timeout multiplier based on the transfer size // One per 200 packets = 12.8MB. Minimum is 2. int multi = 2 + (int) (bytesSent / dfsClient.getConf().writePacketSize) / 200; return dfsClient.getDatanodeReadTimeout(multi); }
/** * Ensures the configured writePacketSize never exceeds * PacketReceiver.MAX_PACKET_SIZE. */ private void initWritePacketSize() { writePacketSize = dfsClient.getConf().writePacketSize; if (writePacketSize > PacketReceiver.MAX_PACKET_SIZE) { LOG.warn( "Configured write packet exceeds {} bytes as max," + " using {} bytes.", PacketReceiver.MAX_PACKET_SIZE, PacketReceiver.MAX_PACKET_SIZE); writePacketSize = PacketReceiver.MAX_PACKET_SIZE; } }
/** * Ensures the configured writePacketSize never exceeds * PacketReceiver.MAX_PACKET_SIZE. */ private void initWritePacketSize() { writePacketSize = dfsClient.getConf().getWritePacketSize(); if (writePacketSize > PacketReceiver.MAX_PACKET_SIZE) { LOG.warn( "Configured write packet exceeds {} bytes as max," + " using {} bytes.", PacketReceiver.MAX_PACKET_SIZE, PacketReceiver.MAX_PACKET_SIZE); writePacketSize = PacketReceiver.MAX_PACKET_SIZE; } }
@Override public long getDefaultBlockSize() { return dfs.getConf().getDefaultBlockSize(); }
/** Add a client. */ private synchronized void addClient(final DFSClient dfsc) { for(DFSClient c : dfsclients) { if (c == dfsc) { //client already exists, nothing to do. return; } } //client not found, add it dfsclients.add(dfsc); //update renewal time final int hdfsTimeout = dfsc.getConf().getHdfsTimeout(); if (hdfsTimeout > 0) { final long half = hdfsTimeout/2; if (half < renewal) { this.renewal = half; } } }
/** Construct a new output stream for creating a file. */ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, null); if (favoredNodes != null && favoredNodes.length != 0) { streamer.setFavoredNodes(favoredNodes); } }
/** Construct a new output stream for creating a file. */ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException { this(dfsClient, src, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, null); if (favoredNodes != null && favoredNodes.length != 0) { streamer.setFavoredNodes(favoredNodes); } }
private long computeTransferReadTimeout() { // transfer timeout multiplier based on the transfer size // One per 200 packets = 12.8MB. Minimum is 2. int multi = 2 + (int) (bytesSent / dfsClient.getConf().getWritePacketSize()) / 200; return dfsClient.getDatanodeReadTimeout(multi); }
protected IOStreamPair connectToDN(DatanodeInfo dn, int timeout, Token<BlockTokenIdentifier> blockToken) throws IOException { return DFSUtilClient.connectToDN(dn, timeout, conf, saslClient, socketFactory, getConf().isConnectToDnViaHostname(), this, blockToken); }
/** Construct a new output stream for creating a file. */ protected DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress, DataChecksum checksum, String[] favoredNodes, boolean createStreamer) { this(dfsClient, src, flag, progress, stat, checksum); this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK); computePacketChunkSize(dfsClient.getConf().getWritePacketSize(), bytesPerChecksum); if (createStreamer) { streamer = new DataStreamer(stat, null, dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager, favoredNodes, addBlockFlags); } }
/** * Get the checksum of the whole file or a range of the file. Note that the * range always starts from the beginning of the file. The file can be * in replicated form, or striped mode. Depending on the * dfs.checksum.combine.mode, checksums may or may not be comparable between * different block layout forms. * * @param src The file path * @param length the length of the range, i.e., the range is [0, length] * @return The checksum * @see DistributedFileSystem#getFileChecksum(Path) */ public FileChecksum getFileChecksumWithCombineMode(String src, long length) throws IOException { ChecksumCombineMode combineMode = getConf().getChecksumCombineMode(); return getFileChecksumInternal(src, length, combineMode); }
/** Close the given client. */ public synchronized void closeClient(final DFSClient dfsc) { dfsclients.remove(dfsc); if (dfsclients.isEmpty()) { if (!isRunning() || isRenewerExpired()) { Factory.INSTANCE.remove(LeaseRenewer.this); return; } if (emptyTime == Long.MAX_VALUE) { //discover the first time that the client list is empty. emptyTime = Time.monotonicNow(); } } //update renewal time if (renewal == dfsc.getConf().getHdfsTimeout()/2) { long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; for(DFSClient c : dfsclients) { final int timeout = c.getConf().getHdfsTimeout(); if (timeout > 0 && timeout < min) { min = timeout; } } renewal = min/2; } }
private DFSClient createMockClient() { final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class); Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout(); DFSClient mock = Mockito.mock(DFSClient.class); Mockito.doReturn(true).when(mock).isClientRunning(); Mockito.doReturn(mockConf).when(mock).getConf(); Mockito.doReturn("myclient").when(mock).getClientName(); return mock; }
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); }
@Override void abort() throws IOException { final MultipleIOException.Builder b = new MultipleIOException.Builder(); synchronized (this) { if (isClosed()) { return; } exceptionLastSeen.set(new IOException("Lease timeout of " + (dfsClient.getConf().getHdfsTimeout() / 1000) + " seconds expired.")); try { closeThreads(true); } catch (IOException e) { b.add(e); } } dfsClient.endFileLease(fileId); final IOException ioe = b.build(); if (ioe != null) { throw ioe; } }
@Override void checksumBlocks() throws IOException { int tmpTimeout = 3000 * 1 + getClient().getConf().getSocketTimeout(); setTimeout(tmpTimeout); for (bgIdx = 0; bgIdx < getLocatedBlocks().size() && getRemaining() >= 0; bgIdx++) { if (isRefetchBlocks()) { // refetch to get fresh tokens refetchBlocks(); } LocatedBlock locatedBlock = getLocatedBlocks().get(bgIdx); LocatedStripedBlock blockGroup = (LocatedStripedBlock) locatedBlock; if (!checksumBlockGroup(blockGroup)) { throw new PathIOException( getSrc(), "Fail to get block checksum for " + locatedBlock); } } }