block.setGenerationStamp( Block.getGenerationStamp(metaFile.getName())); block.setNumBytes(blkFile.length());
@Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { try (AutoCloseableLock lock = datasetLock.acquire()) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { throw new ReplicaNotFoundException(block); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException( "Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica); } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { block.setGenerationStamp(replica.getGenerationStamp()); } } ReplicaInfo r = getBlockReplica(block); File blockFile = new File(r.getBlockURI()); File metaFile = new File(r.getMetadataURI()); BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString()); return info; }
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
synchronized void setGenerationStamp(long generationStamp) { assert currentBlock != null; currentBlock.setGenerationStamp(generationStamp); }
synchronized void setGenerationStamp(long generationStamp) { assert currentBlock != null; currentBlock.setGenerationStamp(generationStamp); }
synchronized void setGenerationStamp(long generationStamp) { assert currentBlock != null; currentBlock.setGenerationStamp(generationStamp); }
replicaHandler = datanode.data.recoverRbw( block, newGs, minBytesRcvd, maxBytesRcvd); block.setGenerationStamp(newGs); break; case PIPELINE_SETUP_APPEND: replicaHandler = datanode.data.append(block, newGs, minBytesRcvd); block.setGenerationStamp(newGs); datanode.notifyNamenodeReceivingBlock( block, replicaHandler.getReplica().getStorageUuid()); case PIPELINE_SETUP_APPEND_RECOVERY: replicaHandler = datanode.data.recoverAppend(block, newGs, minBytesRcvd); block.setGenerationStamp(newGs); datanode.notifyNamenodeReceivingBlock( block, replicaHandler.getReplica().getStorageUuid());
b.setGenerationStamp(storedGS); if (data.isValidRbw(b)) { stage = BlockConstructionStage.TRANSFER_RBW;
+ " for block " + block); block.setGenerationStamp(replica.getGenerationStamp());
block.setGenerationStamp(latestGenerationStamp); block.setNumBytes(minBytesRcvd);
block.setGenerationStamp(nextGenerationStamp( blockManager.isLegacyBlock(block.getLocalBlock())));
@Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { synchronized(this) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { throw new ReplicaNotFoundException(block); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException( "Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica); } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { block.setGenerationStamp(replica.getGenerationStamp()); } } File datafile = getBlockFile(block); File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp()); BlockLocalPathInfo info = new BlockLocalPathInfo(block, datafile.getAbsolutePath(), metafile.getAbsolutePath()); return info; }
@Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { synchronized(this) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { throw new ReplicaNotFoundException(block); } if (replica.getGenerationStamp() < block.getGenerationStamp()) { throw new IOException( "Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica); } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { block.setGenerationStamp(replica.getGenerationStamp()); } } File datafile = getBlockFile(block); File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp()); BlockLocalPathInfo info = new BlockLocalPathInfo(block, datafile.getAbsolutePath(), metafile.getAbsolutePath()); return info; }
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
b.setGenerationStamp(storedGS); if (data.isValidRbw(b)) { stage = BlockConstructionStage.TRANSFER_RBW;
/** * Call {@link ClientProtocol#updateBlockForPipeline} and assign updated block * to healthy streamers. * @param healthyStreamers The healthy data streamers. These streamers join * the failure handling. */ private ExtendedBlock updateBlockForPipeline( Set<StripedDataStreamer> healthyStreamers) throws IOException { final LocatedBlock updated = dfsClient.namenode.updateBlockForPipeline( currentBlockGroup, dfsClient.clientName); final long newGS = updated.getBlock().getGenerationStamp(); ExtendedBlock newBlock = new ExtendedBlock(currentBlockGroup); newBlock.setGenerationStamp(newGS); final LocatedBlock[] updatedBlks = StripedBlockUtil.parseStripedBlockGroup( (LocatedStripedBlock) updated, cellSize, numDataBlocks, numAllBlocks - numDataBlocks); for (int i = 0; i < numAllBlocks; i++) { StripedDataStreamer si = getStripedDataStreamer(i); if (healthyStreamers.contains(si)) { final LocatedBlock lb = new LocatedBlock(new ExtendedBlock(newBlock), null, null, null, -1, updated.isCorrupt(), null); lb.setBlockToken(updatedBlks[i].getBlockToken()); coordinator.getNewBlocks().offer(i, lb); } } return newBlock; }
block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock()))); locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]); blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock()))); locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]); blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
newBlockClone); previous.setGenerationStamp(123); try{ dfs.getClient().getNamenode().complete(file.toString(),