/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
/** Convert an ExtendedBlock to a Json map. */ private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) { if (extendedblock == null) { return null; } final Map<String, Object> m = new TreeMap<String, Object>(); m.put("blockPoolId", extendedblock.getBlockPoolId()); m.put("blockId", extendedblock.getBlockId()); m.put("numBytes", extendedblock.getNumBytes()); m.put("generationStamp", extendedblock.getGenerationStamp()); return m; }
@Override public String toString() { // Called in AsyncDiskService.execute for displaying error messages. return "deletion of block " + block.getBlockPoolId() + " " + block.getLocalBlock() + " with block file " + replicaToDelete.getBlockURI() + " and meta file " + replicaToDelete.getMetadataURI() + " from volume " + volume; }
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)) OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) .setRequestedChecksum(checksumProto)
/** * Get File name for a given block. */ private ReplicaInfo getBlockReplica(ExtendedBlock b) throws IOException { return getBlockReplica(b.getBlockPoolId(), b.getBlockId()); }
@Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { try (AutoCloseableLock lock = datasetLock.acquire()) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { throw new ReplicaNotFoundException(block); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException( "Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica); } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { block.setGenerationStamp(replica.getGenerationStamp()); } } ReplicaInfo r = getBlockReplica(block); File blockFile = new File(r.getBlockURI()); File metaFile = new File(r.getMetadataURI()); BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString()); return info; }
getActiveNamenodeForBP(block.getBlockPoolId()); rBlock.getNewBlock().getBlockId() : block.getBlockId(); block.getNumBytes(), isTruncateRecovery, syncList); final ExtendedBlock newBlock = new ExtendedBlock(bpid, blockId, -1, recoveryId); switch(bestState) { newBlock.setNumBytes(finalizedLength); break; case RBW: newBlock.setNumBytes(minLength); break; case RUR: newBlock.setNumBytes(rBlock.getNewBlock().getNumBytes()); block, block.getNumBytes(), bestState.name(), newBlock, newBlock.getNumBytes(), participatingList); try { r.updateReplicaUnderRecovery(bpid, recoveryId, blockId, newBlock.getNumBytes()); successList.add(r); } catch (IOException e) {
datanode : DataNode.createInterDataNodeProtocolProxy(id, conf, dnConf.socketTimeout, dnConf.connectToDnViaHostname); ExtendedBlock internalBlk = new ExtendedBlock(block); final long blockId = block.getBlockId() + blockIndices[i]; internalBlk.setBlockId(blockId); ReplicaRecoveryInfo info = callInitReplicaRecovery(proxyDN, new RecoveringBlock(internalBlk, null, recoveryId)); info.getGenerationStamp() >= block.getGenerationStamp() && info.getNumBytes() > 0) { final BlockRecord existing = syncBlocks.get(blockId); if (LOG.isDebugEnabled()) { LOG.debug("Recovering block " + block + ", length=" + block.getNumBytes() + ", safeLength=" + safeLength + ", syncList=" + syncBlocks); newStorages[index] = r.storageID; ExtendedBlock newBlock = new ExtendedBlock(bpid, block.getBlockId(), safeLength, recoveryId); DatanodeProtocolClientSideTranslatorPB nn = getActiveNamenodeForBP(bpid); nn.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false, newLocs, newStorages);
/** * This method creates an internal {@link ExtendedBlock} at the given index * of a block group. */ public static ExtendedBlock constructInternalBlock(ExtendedBlock blockGroup, int cellSize, int dataBlkNum, int idxInBlockGroup) { ExtendedBlock block = new ExtendedBlock(blockGroup); block.setBlockId(blockGroup.getBlockId() + idxInBlockGroup); block.setNumBytes(getInternalBlockLength(blockGroup.getNumBytes(), cellSize, dataBlkNum, idxInBlockGroup)); return block; }
@Override // FsDatasetSpi public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { try (AutoCloseableLock lock = datasetLock.acquire()) { final Replica replica = getReplicaInfo(block.getBlockPoolId(), block.getBlockId()); if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException( "replica.getGenerationStamp() < block.getGenerationStamp(), block=" + block + ", replica=" + replica); } return replica.getVisibleLength(); } }
} else { ExtendedBlock block = new ExtendedBlock(bpid, Block.filename2id(state.curEntry)); File expectedBlockDir = DatanodeUtil.idToBlockDir( new File("."), block.getBlockId()); File actualBlockDir = Paths.get(".", state.curFinalizedDir, state.curFinalizedSubDir).toFile(); "directory. Expected directory: {}. " + "Actual directory: {}", storageID, bpid, block.getBlockId(), expectedBlockDir.getPath(), actualBlockDir.getPath()); continue; block.setGenerationStamp( Block.getGenerationStamp(metaFile.getName())); block.setNumBytes(blkFile.length());
final ExtendedBlock originalBlock = new ExtendedBlock(block); if (block.getNumBytes() == 0) { block.setNumBytes(dataXceiverServer.estimateBlockSize); block.setGenerationStamp(latestGenerationStamp); block.setNumBytes(minBytesRcvd); datanode.closeBlock(block, null, storageUuid, isOnTransientStorage); LOG.info("Received {} src: {} dest: {} of size {}", block, remoteAddress, localAddress, block.getNumBytes()); size = block.getNumBytes();
void transferBlocks(String poolId, Block blocks[], DatanodeInfo[][] xferTargets, StorageType[][] xferTargetStorageTypes, String[][] xferTargetStorageIDs) { for (int i = 0; i < blocks.length; i++) { try { transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], xferTargetStorageTypes[i], xferTargetStorageIDs[i]); } catch (IOException ie) { LOG.warn("Failed to transfer block " + blocks[i], ie); } } }
/** * Remove the temporary block file (if any) */ @Override // FsDatasetSpi public void unfinalizeBlock(ExtendedBlock b) throws IOException { try (AutoCloseableLock lock = datasetLock.acquire()) { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) { // remove from volumeMap volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock()); // delete the on-disk temp file if (delBlockFromDisk(replicaInfo)) { LOG.warn("Block " + b + " unfinalized and removed. "); } if (replicaInfo.getVolume().isTransientStorage()) { ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true); } } } }
if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException("Replica gen stamp < block genstamp, block=" + block + ", replica=" + replica); } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("Bumping up the client provided" + " for block " + block); block.setGenerationStamp(replica.getGenerationStamp()); datanode.data.invalidate(block.getBlockPoolId(), new Block[] {block.getLocalBlock()}); String msg = " Offset " + startOffset + " and length " + length + " don't match block " + block + " ( blockLen " + end + " )"; LOG.warn(datanode.getDNRegistrationForBP(block.getBlockPoolId()) + ":sendBlock() : " + msg); throw new IOException(msg);
BlockReceiver.this.close(); endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block, dirSyncOnFinalize); long offset = 0; DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block .getBlockPoolId()); ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, myAddr, block.getNumBytes(), "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(), block, endTime - startTime)); } else { LOG.info("Received " + block + " size " + block.getNumBytes() + " from " + inAddr);
try { Block b = volume.getDataset().getStoredBlock( cblock.getBlockPoolId(), cblock.getBlockId()); if (b == null) { LOG.info("Replica {} was not found in the VolumeMap for volume {}", cblock, volume); } else { block = new ExtendedBlock(cblock.getBlockPoolId(), b);
if (newGS < b.getGenerationStamp()) { throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp"); ReplicaInPipeline replica = null; try { replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes()); } catch (IOException e) { IOUtils.cleanup(null, ref);
/** Generate a block token for the located block. */ public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { // Use cached UGI if serving RPC calls. if (b.isStriped()) { Preconditions.checkState(b instanceof LocatedStripedBlock); LocatedStripedBlock sb = (LocatedStripedBlock) b; byte[] indices = sb.getBlockIndices(); Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length]; ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock()); for (int i = 0; i < indices.length; i++) { internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]); blockTokens[i] = blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), internalBlock, EnumSet.of(mode), b.getStorageTypes(), b.getStorageIDs()); } sb.setBlockTokens(blockTokens); } b.setBlockToken(blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode), b.getStorageTypes(), b.getStorageIDs())); } }
/** * Connect to the first item in the target list. Pass along the * entire target list, the block, and the data. */ DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes, String[] targetStorageIds, ExtendedBlock b, BlockConstructionStage stage, final String clientname) { if (DataTransferProtocol.LOG.isDebugEnabled()) { DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + "clientname={}, targets={}, target storage types={}, " + "target storage IDs={}", getClass().getSimpleName(), b, b.getNumBytes(), stage, clientname, Arrays.asList(targets), targetStorageTypes == null ? "[]" : Arrays.asList(targetStorageTypes), targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); } this.targets = targets; this.targetStorageTypes = targetStorageTypes; this.targetStorageIds = targetStorageIds; this.b = b; this.stage = stage; BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId()); bpReg = bpos.bpRegistration; this.clientname = clientname; this.cachingStrategy = new CachingStrategy(true, getDnConf().readaheadLength); }