/** * End the current block and complete file at namenode. You should call * {@link #recoverAndClose(CancelableProgressable)} if this method throws an exception. */ @Override public void close() throws IOException { endBlock(); state = State.CLOSED; datanodeList.forEach(ch -> ch.close()); datanodeList.forEach(ch -> ch.closeFuture().awaitUninterruptibly()); block.setNumBytes(ackedBlockLength); completeFile(client, namenode, src, clientName, block, fileId); }
int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT); ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
private ExtendedBlock getInternalBlock(int numDataUnits, int idx) { // Sets requested number of bytes in blockGroup which is required to // construct the internal block for computing checksum. long actualNumBytes = blockGroup.getNumBytes(); blockGroup.setNumBytes(requestedNumBytes); ExtendedBlock block = StripedBlockUtil.constructInternalBlock(blockGroup, ecPolicy.getCellSize(), numDataUnits, idx); // Set back actualNumBytes value in blockGroup. blockGroup.setNumBytes(actualNumBytes); return block; }
BlockReceiver.this.close(); endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block, dirSyncOnFinalize);
block.setGenerationStamp( Block.getGenerationStamp(metaFile.getName())); block.setNumBytes(blkFile.length());
newBlock.setNumBytes(finalizedLength); break; case RBW: throw new IOException("Incorrect block size"); newBlock.setNumBytes(minLength); break; case RUR: newBlock.setNumBytes(rBlock.getNewBlock().getNumBytes());
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
synchronized void setNumBytes(long numBytes) { assert currentBlock != null; currentBlock.setNumBytes(numBytes); }
synchronized void setNumBytes(long numBytes) { assert currentBlock != null; currentBlock.setNumBytes(numBytes); }
synchronized void setNumBytes(long numBytes) { assert currentBlock != null; currentBlock.setNumBytes(numBytes); }
block.setNumBytes(dataXceiverServer.estimateBlockSize);
b.setNumBytes(visible);
block.setNumBytes(replicaInfo.getNumBytes());
block.setNumBytes(dataXceiverServer.estimateBlockSize); stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) { block.setGenerationStamp(latestGenerationStamp); block.setNumBytes(minBytesRcvd);
private void updatePipeline(ExtendedBlock newBG) throws IOException { final DatanodeInfo[] newNodes = new DatanodeInfo[numAllBlocks]; final String[] newStorageIDs = new String[numAllBlocks]; for (int i = 0; i < numAllBlocks; i++) { final StripedDataStreamer streamer = getStripedDataStreamer(i); final DatanodeInfo[] nodes = streamer.getNodes(); final String[] storageIDs = streamer.getStorageIDs(); if (streamer.isHealthy() && nodes != null && storageIDs != null) { newNodes[i] = nodes[0]; newStorageIDs[i] = storageIDs[0]; } else { newNodes[i] = new DatanodeInfoBuilder() .setNodeID(DatanodeID.EMPTY_DATANODE_ID).build(); newStorageIDs[i] = ""; } } // Update the NameNode with the acked length of the block group // Save and restore the unacked length final long sentBytes = currentBlockGroup.getNumBytes(); final long ackedBytes = getAckedLength(); Preconditions.checkState(ackedBytes <= sentBytes, "Acked:" + ackedBytes + ", Sent:" + sentBytes); currentBlockGroup.setNumBytes(ackedBytes); newBG.setNumBytes(ackedBytes); dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup, newBG, newNodes, newStorageIDs); currentBlockGroup = newBG; currentBlockGroup.setNumBytes(sentBytes); }
/** * This method creates an internal {@link ExtendedBlock} at the given index * of a block group. */ public static ExtendedBlock constructInternalBlock(ExtendedBlock blockGroup, int cellSize, int dataBlkNum, int idxInBlockGroup) { ExtendedBlock block = new ExtendedBlock(blockGroup); block.setBlockId(blockGroup.getBlockId() + idxInBlockGroup); block.setNumBytes(getInternalBlockLength(blockGroup.getNumBytes(), cellSize, dataBlkNum, idxInBlockGroup)); return block; }
BlockReceiver.this.close(); endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block, dirSyncOnFinalize);
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
/** * Update replica with the new generation stamp and length. */ @Override // InterDatanodeProtocol public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important // for HA, since otherwise the standby node may lose track of the // block locations until the next block report. ExtendedBlock newBlock = new ExtendedBlock(oldBlock); newBlock.setGenerationStamp(recoveryId); newBlock.setBlockId(newBlockId); newBlock.setNumBytes(newLength); final String storageID = r.getStorageUuid(); notifyNamenodeReceivedBlock(newBlock, null, storageID, r.isOnTransientStorage()); return storageID; }
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId) throws IOException { int bytesAdded = 0; for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) { ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual // data written ReplicaInPipelineInterface bInfo = fsdataset.createRbw( StorageType.DEFAULT, b, false).getReplica(); ReplicaOutputStreams out = bInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512)); try { OutputStream dataOut = out.getDataOut(); assertEquals(0, fsdataset.getLength(b)); for (int j=1; j <= blockIdToLen(i); ++j) { dataOut.write(j); assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write bytesAdded++; } } finally { out.close(); } b.setNumBytes(blockIdToLen(i)); fsdataset.finalizeBlock(b, false); assertEquals(blockIdToLen(i), fsdataset.getLength(b)); } return bytesAdded; } int addSomeBlocks(SimulatedFSDataset fsdataset ) throws IOException {