void transferBlocks(String poolId, Block blocks[], DatanodeInfo[][] xferTargets, StorageType[][] xferTargetStorageTypes, String[][] xferTargetStorageIDs) { for (int i = 0; i < blocks.length; i++) { try { transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], xferTargetStorageTypes[i], xferTargetStorageIDs[i]); } catch (IOException ie) { LOG.warn("Failed to transfer block " + blocks[i], ie); } } }
private void updateReplicaUnderRecovery(String bpid, long recoveryId, long newBlockId, long newLength) throws IOException { final ExtendedBlock b = new ExtendedBlock(bpid, rInfo); storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, newBlockId, newLength); }
ExtendedBlock getExtendedBlock(Block blk) { return new ExtendedBlock(getBlockPoolId(), blk); }
conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
@Override public ExtendedBlock nextBlock() throws IOException { if (null == blockIterator || !blockIterator.hasNext()) { return null; } FileRegion nextRegion = null; while (null == nextRegion && blockIterator.hasNext()) { FileRegion temp = blockIterator.next(); if (temp.getBlock().getBlockId() < state.lastBlockId) { continue; } nextRegion = temp; } if (null == nextRegion) { return null; } state.lastBlockId = nextRegion.getBlock().getBlockId(); return new ExtendedBlock(bpid, nextRegion.getBlock()); }
FileInputStream blockIn = null, metaIn = null; MappableBlock mappableBlock = null; ExtendedBlock extBlk = new ExtendedBlock(key.getBlockPoolId(), key.getBlockId(), length, genstamp); long newUsedBytes = reserve(length);
} else { ExtendedBlock block = new ExtendedBlock(bpid, Block.filename2id(state.curEntry)); File expectedBlockDir = DatanodeUtil.idToBlockDir( new File("."), block.getBlockId());
ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(), reportedBlock.getBlock()); final KeyManager km = nnc.getKeyManager();
new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing)); } else { asyncDiskService.deleteSync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
private BlockMovementStatus moveBlock() { ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(), blkMovingInfo.getBlock()); final KeyManager km = nnc.getKeyManager(); Token<BlockTokenIdentifier> accessToken; try { accessToken = km.getAccessToken(eb, new StorageType[]{blkMovingInfo.getTargetStorageType()}, new String[0]); } catch (IOException e) { // TODO: handle failure retries LOG.warn( "Failed to move block:{} from src:{} to destin:{} to satisfy " + "storageType:{}", blkMovingInfo.getBlock(), blkMovingInfo.getSource(), blkMovingInfo.getTarget(), blkMovingInfo.getTargetStorageType(), e); return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE; } return blkDispatcher.moveBlock(blkMovingInfo, saslClient, eb, new Socket(), km, accessToken); } }
/** Generate a block token for the located block. */ public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { // Use cached UGI if serving RPC calls. if (b.isStriped()) { Preconditions.checkState(b instanceof LocatedStripedBlock); LocatedStripedBlock sb = (LocatedStripedBlock) b; byte[] indices = sb.getBlockIndices(); Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length]; ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock()); for (int i = 0; i < indices.length; i++) { internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]); blockTokens[i] = blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), internalBlock, EnumSet.of(mode), b.getStorageTypes(), b.getStorageIDs()); } sb.setBlockTokens(blockTokens); } b.setBlockToken(blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode), b.getStorageTypes(), b.getStorageIDs())); } }
uc.getTruncateBlock().getBlockId() != b.getBlockId(); ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b);
datanode : DataNode.createInterDataNodeProtocolProxy(id, conf, dnConf.socketTimeout, dnConf.connectToDnViaHostname); ExtendedBlock internalBlk = new ExtendedBlock(block); final long blockId = block.getBlockId() + blockIndices[i]; internalBlk.setBlockId(blockId); newStorages[index] = r.storageID; ExtendedBlock newBlock = new ExtendedBlock(bpid, block.getBlockId(), safeLength, recoveryId); DatanodeProtocolClientSideTranslatorPB nn = getActiveNamenodeForBP(bpid);
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
@Override void addTaskToDatanode(NumberReplicas numberReplicas) { final DatanodeStorageInfo[] targets = getTargets(); assert targets.length > 0; BlockInfoStriped stripedBlk = (BlockInfoStriped) getBlock(); if (hasNotEnoughRack()) { // if we already have all the internal blocks, but not enough racks, // we only need to replicate one internal block to a new rack int sourceIndex = chooseSource4SimpleReplication(); createReplicationWork(sourceIndex, targets[0]); } else if ((numberReplicas.decommissioning() > 0 || numberReplicas.liveEnteringMaintenanceReplicas() > 0) && hasAllInternalBlocks()) { List<Integer> leavingServiceSources = findLeavingServiceSources(); // decommissioningSources.size() should be >= targets.length final int num = Math.min(leavingServiceSources.size(), targets.length); for (int i = 0; i < num; i++) { createReplicationWork(leavingServiceSources.get(i), targets[i]); } } else { targets[0].getDatanodeDescriptor().addBlockToBeErasureCoded( new ExtendedBlock(blockPoolId, stripedBlk), getSrcNodes(), targets, getLiveBlockIndicies(), stripedBlk.getErasureCodingPolicy()); } }
cblock, volume); } else { block = new ExtendedBlock(cblock.getBlockPoolId(), b);
/** * Invalidate a block but does not delete the actual on-disk block file. * * It should only be used when deactivating disks. * * @param bpid the block pool ID. * @param block The block to be invalidated. */ public void invalidate(String bpid, ReplicaInfo block) { // If a DFSClient has the replica in its cache of short-circuit file // descriptors (and the client is using ShortCircuitShm), invalidate it. datanode.getShortCircuitRegistry().processBlockInvalidation( new ExtendedBlockId(block.getBlockId(), bpid)); // If the block is cached, start uncaching it. cacheManager.uncacheBlock(bpid, block.getBlockId()); datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block), block.getStorageUuid()); }
/** * Cleanup the old replica and notifies the NN about new replica. * * @param replicaInfo - Old replica to be deleted * @param newReplicaInfo - New replica object * @param bpid - block pool id */ private void removeOldReplica(ReplicaInfo replicaInfo, ReplicaInfo newReplicaInfo, final String bpid) { // Before deleting the files from old storage we must notify the // NN that the files are on the new storage. Else a blockReport from // the transient storage might cause the NN to think the blocks are lost. // Replicas must be evicted from client short-circuit caches, because the // storage will no longer be same, and thus will require validating // checksum. This also stops a client from holding file descriptors, // which would prevent the OS from reclaiming the memory. ExtendedBlock extendedBlock = new ExtendedBlock(bpid, newReplicaInfo); datanode.getShortCircuitRegistry().processBlockInvalidation( ExtendedBlockId.fromExtendedBlock(extendedBlock)); datanode.notifyNamenodeReceivedBlock( extendedBlock, null, newReplicaInfo.getStorageUuid(), newReplicaInfo.isOnTransientStorage()); // Remove the old replicas cleanupReplica(bpid, replicaInfo); // If deletion failed then the directory scanner will cleanup the blocks // eventually. }
if (blk.isStriped()) { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos, } else { final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return null == locatedBlocks " numCorrupt: " + numCorruptNodes + " numCorruptRepls: " + numCorruptReplicas; final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk); return blockIndices == null ? null == locatedBlocks ? newLocatedBlock(eb, machines, pos, isCorrupt)
BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock); ret = new LocatedBlock(blk, new DatanodeInfo[0]);