data.addBlockPool(nsInfo.getBlockPoolID(), getConf()); blockScanner.enableBlockPoolId(bpos.getBlockPoolId()); initDirectoryScanner(getConf());
datanode.data.createTemporary(storageType, storageId, block, false); } else { switch (stage) { case PIPELINE_SETUP_CREATE: replicaHandler = datanode.data.createRbw(storageType, storageId, block, allowLazyPersist); datanode.notifyNamenodeReceivingBlock( break; case PIPELINE_SETUP_STREAMING_RECOVERY: replicaHandler = datanode.data.recoverRbw( block, newGs, minBytesRcvd, maxBytesRcvd); block.setGenerationStamp(newGs); break; case PIPELINE_SETUP_APPEND: replicaHandler = datanode.data.append(block, newGs, minBytesRcvd); block.setGenerationStamp(newGs); datanode.notifyNamenodeReceivingBlock( break; case PIPELINE_SETUP_APPEND_RECOVERY: replicaHandler = datanode.data.recoverAppend(block, newGs, minBytesRcvd); block.setGenerationStamp(newGs); datanode.notifyNamenodeReceivingBlock( replicaHandler = datanode.data.createTemporary(storageType, storageId, block, isTransfer); break;
endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block, dirSyncOnFinalize); datanode.data.setPinning(block);
private void sendLifeline() throws IOException { StorageReport[] reports = dn.getFSDataset().getStorageReports(bpos.getBlockPoolId()); if (LOG.isDebugEnabled()) { LOG.debug("Sending lifeline with " + reports.length + " storage " + " reports from service actor: " + BPServiceActor.this); } VolumeFailureSummary volumeFailureSummary = dn.getFSDataset() .getVolumeFailureSummary(); int numFailedVolumes = volumeFailureSummary != null ? volumeFailureSummary.getFailedStorageLocations().length : 0; lifelineNamenode.sendLifeline(bpRegistration, reports, dn.getFSDataset().getCacheCapacity(), dn.getFSDataset().getCacheUsed(), dn.getXmitsInProgress(), dn.getXceiverCount(), numFailedVolumes, volumeFailureSummary); } }
DatanodeCommand cacheReport() throws IOException { // If caching is disabled, do not send a cache report if (dn.getFSDataset().getCacheCapacity() == 0) { return null; } // send cache report if timer has expired. DatanodeCommand cmd = null; final long startTime = monotonicNow(); if (startTime - lastCacheReport > dnConf.cacheReportInterval) { if (LOG.isDebugEnabled()) { LOG.debug("Sending cacheReport from service actor: " + this); } lastCacheReport = startTime; String bpid = bpos.getBlockPoolId(); List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid); long createTime = monotonicNow(); cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds); long sendTime = monotonicNow(); long createCost = createTime - startTime; long sendCost = sendTime - createTime; dn.getMetrics().addCacheReport(sendCost); if (LOG.isDebugEnabled()) { LOG.debug("CacheReport of " + blockIds.size() + " block(s) took " + createCost + " msec to generate and " + sendCost + " msecs for RPC and NN processing"); } } return cmd; }
try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) { replica = getReplica(block, datanode); replicaVisibleLength = replica.getVisibleLength(); volumeRef = datanode.data.getVolume(block).obtainReference(); try { DataNodeFaultInjector.get().throwTooManyOpenFiles(); metaIn = datanode.data.getMetaDataInputStream(block); if (!corruptChecksumOk || metaIn != null) { if (metaIn == null) { datanode.data.invalidate(block.getBlockPoolId(), new Block[] {block.getLocalBlock()}); DataNode.LOG.debug("replica=" + replica); blockIn = datanode.data.getBlockInputStream(block, offset); // seek to offset ris = new ReplicaInputStreams( blockIn, checksumIn, volumeRef, fileIoProvider);
this.volumeRef = datanode.data.getVolume(block).obtainReference(); boolean keepMetaInOpen = false; try { metaIn = datanode.data.getMetaDataInputStream(block); if (!corruptChecksumOk || metaIn != null) { if (metaIn == null) { DataNode.LOG.debug("replica=" + replica); blockIn = datanode.data.getBlockInputStream(block, offset); // seek to offset if (blockIn instanceof FileInputStream) { blockInFd = ((FileInputStream)blockIn).getFD();
datanode.data.convertTemporaryToRbw(block); } else { datanode.data.finalizeBlock(block, dirSyncOnFinalize);
InputStream getBlockInputStream(ExtendedBlock block, long seekOffset) throws IOException { return datanode.data.getBlockInputStream(block, seekOffset); }
private void notifyNamenodeBlock(ExtendedBlock block, BlockStatus status, String delHint, String storageUuid, boolean isOnTransientStorage) { checkBlock(block); final ReceivedDeletedBlockInfo info = new ReceivedDeletedBlockInfo( block.getLocalBlock(), status, delHint); final DatanodeStorage storage = dn.getFSDataset().getStorage(storageUuid); for (BPServiceActor actor : bpServices) { actor.getIbrManager().notifyNamenodeBlock(info, storage, isOnTransientStorage); } }
dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
assertEquals(0, fsd.getNumBlocksCached()); final long cacheCapacity = fsd.getCacheCapacity(); long cacheUsed = fsd.getCacheUsed(); long current = 0; assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
LOG.debug("Running " + GenericTestUtils.getMethodName()); ReplicaInPipelineInterface replicaInfo = dn.data.createRbw( StorageType.DEFAULT, block, false).getReplica(); ReplicaOutputStreams streams = null; DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512)); streams.getChecksumOut().write('a'); dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1)); try { dn.syncBlock(rBlock, initBlockRecords(dn));
final long cacheCapacity = fsd.getCacheCapacity(); long cacheUsed = fsd.getCacheUsed(); long current = 0; assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
@Override public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token<BlockTokenIdentifier> token) throws IOException { checkBlockLocalPathAccess(); checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ); Preconditions.checkNotNull(data, "Storage not yet initialized"); BlockLocalPathInfo info = data.getBlockLocalPathInfo(block); if (info != null) { LOG.trace("getBlockLocalPathInfo successful " + "block={} blockfile {} metafile {}", block, info.getBlockPath(), info.getMetaPath()); } else { LOG.trace("getBlockLocalPathInfo for block={} " + "returning null", block); } metrics.incrBlocksGetLocalPathInfo(); return info; }
public Object answer(InvocationOnMock invocation) throws IOException, InterruptedException { Thread.sleep(1000); // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that // the block is not removed, since the volume reference should not // be released at this point. data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0], (boolean) invocation.getArguments()[1]); return null; } }).when(dn.data).finalizeBlock(any(ExtendedBlock.class),
/** * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery. * * @throws IOException in case of an error */ @Test public void testNoReplicaUnderRecovery() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } dn.data.createRbw(StorageType.DEFAULT, block, false); try { dn.syncBlock(rBlock, initBlockRecords(dn)); fail("Sync should fail"); } catch (IOException e) { e.getMessage().startsWith("Cannot recover "); } DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID); verify(namenode, never()).commitBlockSynchronization( any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class)); }
@Override public Boolean get() { long curCacheUsed = fsd.getCacheUsed(); long curBlocks = fsd.getNumBlocksCached(); if ((curCacheUsed != expectedCacheUsed) || (curBlocks != expectedBlocks)) { if (tries++ > 10) { LOG.info("verifyExpectedCacheUsage: have " + curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " + curBlocks + "/" + expectedBlocks + " blocks cached. " + "memlock limit = " + NativeIO.POSIX.getCacheManipulator().getMemlockLimit() + ". Waiting..."); } return false; } LOG.info("verifyExpectedCacheUsage: got " + curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " + curBlocks + "/" + expectedBlocks + " blocks cached. " + "memlock limit = " + NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); return true; } }, 100, 60000);
long newGS = block.getGenerationStamp() + 1; ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
scheduler.scheduleNextHeartbeat(); StorageReport[] reports = dn.getFSDataset().getStorageReports(bpos.getBlockPoolId()); if (LOG.isDebugEnabled()) { LOG.debug("Sending heartbeat with " + reports.length + scheduler.updateLastHeartbeatTime(now); VolumeFailureSummary volumeFailureSummary = dn.getFSDataset() .getVolumeFailureSummary(); int numFailedVolumes = volumeFailureSummary != null ? volumeFailureSummary.getFailedStorageLocations().length : 0; dn.getFSDataset().getCacheCapacity(), dn.getFSDataset().getCacheUsed(), dn.getXmitsInProgress(), dn.getXceiverCount(),