cmd = impl.blockReport(PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), report, request.hasContext() ?
@Override public CommitBlockSynchronizationResponseProto commitBlockSynchronization( RpcController controller, CommitBlockSynchronizationRequestProto request) throws ServiceException { List<DatanodeIDProto> dnprotos = request.getNewTaragetsList(); DatanodeID[] dns = new DatanodeID[dnprotos.size()]; for (int i = 0; i < dnprotos.size(); i++) { dns[i] = PBHelperClient.convert(dnprotos.get(i)); } final List<String> sidprotos = request.getNewTargetStoragesList(); final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]); try { impl.commitBlockSynchronization(PBHelperClient.convert(request.getBlock()), request.getNewGenStamp(), request.getNewLength(), request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs); } catch (IOException e) { throw new ServiceException(e); } return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO; } }
@Override public VersionResponseProto versionRequest(RpcController controller, VersionRequestProto request) throws ServiceException { NamespaceInfo info; try { info = impl.versionRequest(); } catch (IOException e) { throw new ServiceException(e); } return VersionResponseProto.newBuilder() .setInfo(PBHelper.convert(info)).build(); }
dnp.blockReceived(reg, blocks, delHints); Assert.fail("Expected IOException is not thrown"); } catch (IOException ex) { dnp.blockReport(reg, blockReport); Assert.fail("Expected IOException is not thrown"); } catch (IOException ex) { DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0); Assert.assertEquals(1, cmd.length); Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
DatanodeCommand[] cmds = nsNamenode.sendHeartbeat(nsRegistration, data.getCapacity(), data.getDfsUsed(), + receivedAndDeletedBlockArray.length + " blocks to " + nnAddr); nsNamenode.blockReceivedAndDeleted(nsRegistration, receivedAndDeletedBlockArray); if (LOG.isDebugEnabled()) { LOG.debug("finshed blockReceivedAndDeleted to " + nnAddr Block[] bReport = data.getBlockReport(namespaceId); DatanodeCommand cmd = nsNamenode.blockReport(nsRegistration, new BlockReport(BlockListAsLongs.convertToArrayLongs(bReport))); firstBlockReportSent = true;
/** * Starts an instance of DataNode * @throws IOException */ @Before public void startUp() throws IOException { conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR); FileSystem.setDefaultUri(conf, "hdfs://localhost:5020"); ArrayList<File> dirs = new ArrayList<File>(); File dataDir = new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); dirs.add(dataDir); DatanodeProtocol namenode = mock(DatanodeProtocol.class); when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, 1L, 1)); when(namenode.sendHeartbeat(any(DatanodeRegistration.class), anyLong(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt())).thenReturn( new DatanodeCommand[0]); dn = new DataNode(conf, dirs, namenode, null); }
namenode.commitBlockSynchronization(block, 0, 0, closeFile, true, DatanodeID.EMPTY_ARRAY); return null; long generationstamp = namenode.nextGenerationStamp(block); Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp); DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]); namenode.commitBlockSynchronization(block, newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false, nlist);
while (shouldRun && shouldServiceRun) { try { nsInfo = nsNamenode.versionRequest(); break; } catch(SocketTimeoutException e) { // namenode is busy LOG.warn( errorMsg ); try { nsNamenode.errorReport( nsRegistration, DatanodeProtocol.NOTIFY, errorMsg ); } catch( SocketTimeoutException e ) { // namenode is busy
@Override public ErrorReportResponseProto errorReport(RpcController controller, ErrorReportRequestProto request) throws ServiceException { try { impl.errorReport(PBHelper.convert(request.getRegistartion()), request.getErrorCode(), request.getMsg()); } catch (IOException e) { throw new ServiceException(e); } return VOID_ERROR_REPORT_RESPONSE_PROTO; }
request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary( request.getVolumeFailureSummary()) : null; response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), report, request.getCacheCapacity(), request.getCacheUsed(), request.getXmitsInProgress(),
@Override public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto request) throws ServiceException { List<LocatedBlockProto> lbps = request.getBlocksList(); LocatedBlock [] blocks = new LocatedBlock [lbps.size()]; for(int i=0; i<lbps.size(); i++) { blocks[i] = PBHelperClient.convertLocatedBlockProto(lbps.get(i)); } try { impl.reportBadBlocks(blocks); } catch (IOException e) { throw new ServiceException(e); } return VOID_REPORT_BAD_BLOCK_RESPONSE; }
nn.errorReport(nsReg, DatanodeProtocol.INVALID_BLOCK, errStr); return; if (block.getNumBytes() > onDiskLength) { nn.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block, new DatanodeInfo[] { new DatanodeInfo(nsReg) }) }); LOG.info("Can't replicate block " + block + " because on-disk length "
/** Send IBRs to namenode. */ void sendIBRs(DatanodeProtocol namenode, DatanodeRegistration registration, String bpid) throws IOException { // Generate a list of the pending reports for each storage under the lock final StorageReceivedDeletedBlocks[] reports = generateIBRs(); if (reports.length == 0) { // Nothing new to report. return; } // Send incremental block reports to the Namenode outside the lock if (LOG.isDebugEnabled()) { LOG.debug("call blockReceivedAndDeleted: " + Arrays.toString(reports)); } boolean success = false; final long startTime = monotonicNow(); try { namenode.blockReceivedAndDeleted(registration, bpid, reports); success = true; } finally { if (success) { dnMetrics.addIncrementalBlockReport(monotonicNow() - startTime); lastIBR = startTime; } else { // If we didn't succeed in sending the report, put all of the // blocks back onto our queue, but only in the case where we // didn't put something newer in the meantime. putMissing(reports); } } }
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = dataNodeProto.registerDatanode(dnRegistration); dnRegistration.setNamespaceInfo(nsInfo); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; dataNodeProto.blockReport(dnRegistration, bpid, reports, new BlockReportContext(1, 0, System.nanoTime())); }
@Override public RegisterDatanodeResponseProto registerDatanode( RpcController controller, RegisterDatanodeRequestProto request) throws ServiceException { DatanodeRegistration registration = PBHelper.convert(request .getRegistration()); DatanodeRegistration registrationResp; try { registrationResp = impl.registerDatanode(registration); } catch (IOException e) { throw new ServiceException(e); } return RegisterDatanodeResponseProto.newBuilder() .setRegistration(PBHelper.convert(registrationResp)).build(); }
@Override public CacheReportResponseProto cacheReport(RpcController controller, CacheReportRequestProto request) throws ServiceException { DatanodeCommand cmd = null; try { cmd = impl.cacheReport( PBHelper.convert(request.getRegistration()), request.getBlockPoolId(), request.getBlocksList()); } catch (IOException e) { throw new ServiceException(e); } CacheReportResponseProto.Builder builder = CacheReportResponseProto.newBuilder(); if (cmd != null) { builder.setCmd(PBHelper.convert(cmd)); } return builder.build(); }
DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration, data.getCapacity(), data.getDfsUsed(), LOG.warn("Panic: block array & delHintArray are not the same" ); namenode.blockReceived(dnRegistration, blockArray, delHintArray); synchronized (receivedBlockList) { synchronized (delHints) { DatanodeCommand cmd = namenode.blockReport(dnRegistration, BlockListAsLongs.convertToArrayLongs(bReport));
dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks); BlockManager bm = cluster.getNamesystem().getBlockManager(); BlockListAsLongs.EMPTY) }; try { dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime())); fail("Expected IOException is not thrown"); new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) }; DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null) .getCommands(); assertEquals(1, cmd.length);
nsNamenode.commitBlockSynchronization(block, 0, 0, closeFile, true, DatanodeID.EMPTY_ARRAY); return null; long generationstamp = nsNamenode.nextGenerationStamp(block, closeFile); Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp); nsNamenode.commitBlockSynchronization(block, newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false, nlist);
while (shouldRun) { try { nsInfo = namenode.versionRequest(); break; } catch(SocketTimeoutException e) { // namenode is busy LOG.fatal( errorMsg ); try { namenode.errorReport( dnRegistration, DatanodeProtocol.NOTIFY, errorMsg ); } catch( SocketTimeoutException e ) { // namenode is busy