@Override public Object run() throws Exception { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final BlockManager bm = namesystem.getBlockManager(); final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); new NamenodeFsck(conf, nn, bm.getDatanodeManager().getNetworkTopology(), pmap, out, totalDatanodes, remoteAddress).fsck(); return null; } });
blockIdCK(blk); sb.append(blk + "\n"); listCorruptFileBlocks(); return; Result ecRes = new ErasureCodingResult(conf); check(path, file, replRes, ecRes);
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) {
try { if (!lfInited) { lostFoundInit(dfs); if (hdfsPathExists(target)) { LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists."); copyBlock(dfs, lblock, fos); } catch (Exception e) { LOG.error("Fsck: could not copy block " + lblock.getBlock() +
when(blockManager.getDatanodeManager()).thenReturn(dnManager); NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, NUM_REPLICAS, remoteAddress); fsck.check(pathString, file, res); } catch (Exception e) { fail("Unexpected exception "+ e.getMessage());
HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( snapshotPath); check(snapshotPath, snapshotFileInfo, res); check(path, files[i], res); LOG.info("Fsck: ignoring open file " + path); } else { if (doMove) copyBlocksToLostFound(parent, file, blocks); if (doDelete) deleteCorruptedFile(path);
try { if (!lfInited) { lostFoundInit(dfs); copyBlock(dfs, lblock, fos); } catch (Exception e) { e.printStackTrace();
if (files != null) { if (showCorruptFileBlocks && showOpenFiles) { listCorruptOpenFiles(); listCorruptFileBlocks(); return; check(files[i], res);
check(files[i], res); case FIXING_MOVE: if (!isOpen) lostFoundMove(file, blocks); break; case FIXING_DELETE:
countStorageTypeSummary(file, lBlk); if (storedBlock.isStriped()) { report.append(" Live_repl=" + liveReplicas); String info = getReplicaInfo(storedBlock); if (!info.isEmpty()){ report.append(" ").append(info); String info = getReplicaInfo(storedBlock); if (!info.isEmpty()){ report.append(" ").append(info); report.append(" len=").append(block.getNumBytes()); report.append(" Expected_repl=" + storages.length); String info=getReplicaInfo(storedBlock); if (!info.isEmpty()){ report.append(" ").append(info); LOG.info("Fsck: ignoring open file " + path); } else { if (doMove) copyBlocksToLostFound(parent, file, blocks); if (doDelete) deleteCorruptedFile(path);
private void checkDir(String path, Result replRes, Result ecRes) throws IOException { if (snapshottableDirs != null && snapshottableDirs.contains(path)) { String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR) + HdfsConstants.DOT_SNAPSHOT_DIR; HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( snapshotPath); check(snapshotPath, snapshotFileInfo, replRes, ecRes); } byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; DirectoryListing thisListing; if (showFiles) { out.println(path + " <dir>"); } totalDirs++; do { assert lastReturnedName != null; thisListing = namenode.getRpcServer().getListing( path, lastReturnedName, false); if (thisListing == null) { return; } HdfsFileStatus[] files = thisListing.getPartialListing(); for (int i = 0; i < files.length; i++) { check(path, files[i], replRes, ecRes); } lastReturnedName = thisListing.getLastName(); } while (thisListing.hasMore()); }
try { if (!lfInited) { lostFoundInit(dfs); if (hdfsPathExists(target)) { LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists."); copyBlock(dfs, lblock, fos); } catch (Exception e) { LOG.error("Fsck: could not copy block " + lblock.getBlock() +
PrintWriter out = new PrintWriter(result, true); InetAddress remoteAddress = InetAddress.getLocalHost(); NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, NUM_REPLICAS, remoteAddress); assertNotNull(file); Result res = new Result(conf); fsck.check(pathString, file, res);
HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( snapshotPath); check(snapshotPath, snapshotFileInfo, res); check(path, files[i], res); LOG.info("Fsck: ignoring open file " + path); } else { if (doMove) copyBlocksToLostFound(parent, file, blocks); if (doDelete) deleteCorruptedFile(path);
try { if (!lfInited) { lostFoundInit(dfs); copyBlock(dfs, lblock, fos); } catch (Exception e) { e.printStackTrace();
check(files[i], res); case FIXING_MOVE: if (!isOpen) lostFoundMove(file, blocks); break; case FIXING_DELETE:
if (files != null) { for (int i = 0; i < files.length; i++) { check(files[i], res);
@SuppressWarnings("unchecked") public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { Map<String,String[]> pmap = request.getParameterMap(); ServletContext context = getServletContext(); NameNode nn = (NameNode) context.getAttribute("name.node"); Configuration conf = (Configuration) context.getAttribute("name.conf"); NamenodeFsck fscker = new NamenodeFsck(conf, nn, pmap, response); fscker.fsck(); } }
try { if (!lfInited) { lostFoundInit(dfs); if (hdfsPathExists(target)) { LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists."); copyBlock(dfs, lblock, fos); } catch (Exception e) { LOG.error("Fsck: could not copy block " + lblock.getBlock() +
blockIdCK(blk); sb.append(blk + "\n"); listCorruptFileBlocks(); return; check(path, file, res);