private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, NumberReplicas num) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); INode fileINode = blocksMap.getINode(block); Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); StringBuffer nodeList = new StringBuffer(); while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); nodeList.append(node.name); nodeList.append(" "); } FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + fileINode.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode.name + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); }
int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedReplicas(); " size: " + block.getNumBytes() + " (replicas:" + " l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedReplicas() + " c: " + numReplicas.corruptReplicas() +
int count = countNodes(blk).liveReplicas(); if (count > 1) {
.liveReplicas()); assertEquals(1, namesystem.blockManager.countNodes(block) .corruptReplicas()); .liveReplicas()); assertEquals(0, namesystem.blockManager.countNodes(block) .corruptReplicas());
.liveReplicas()); assertEquals(1, namesystem.blockManager.countNodes(block) .corruptReplicas()); .liveReplicas()); assertEquals(0, namesystem.blockManager.countNodes(block) .corruptReplicas());
.liveReplicas()); assertEquals(1, namesystem.blockManager.countNodes(block) .corruptReplicas()); .liveReplicas()); assertEquals(0, namesystem.blockManager.countNodes(block) .corruptReplicas());
int numCurrentReplica = num.liveReplicas();
int numCurrentReplica = num.liveReplicas();
if (countNodes(blk).liveReplicas()>inode.getReplication()) {
int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); if (curExpectedReplicas > curReplicas) {
/** * Invalidates the given block on the given datanode. */ public synchronized void invalidateBlock(Block blk, DatanodeInfo dn) throws IOException { NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: " + blk + " on " + dn.getName()); DatanodeDescriptor node = getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate block " + blk + " because datanode " + dn.getName() + " does not exist."); } // Check how many copies we have of the block. If we have at least one // copy on a live node, then we can delete it. int count = countNodes(blk).liveReplicas(); if (count > 1) { addToInvalidates(blk, dn); removeStoredBlock(blk, node); NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn.getName() + " listed for deletion."); } else { NameNode.stateChangeLog.info("BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn.getName() + " is the only copy and was not deleted."); } }
/** * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { writeLock(); try { for (int i = 0; i < timedOutItems.length; i++) { NumberReplicas num = countNodes(timedOutItems[i]); neededReplications.add(timedOutItems[i], num.liveReplicas(), num.decommissionedReplicas(), getReplication(timedOutItems[i])); } } finally { writeUnlock(); } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } }
/** * This method is invoked just before removing a block from the * {@link #blocksMap}. It is used to correctly update the safe block count * when a block is removed from the {@link #blocksMap}. If the current * replication for the block is greater than {@link #minReplication} this * means that the safe block count has not already been decremented for this * block and hence we should decrement the safe block count. * * @param b * the block that has to be removed. */ void decrementSafeBlockCountForBlockRemoval(Block b) { if (safeMode != null && safeMode.isOn()) { int replication = (short) countNodes(b).liveReplicas(); if (replication >= minReplication) { this.blocksSafe--; safeMode.checkMode(); } } }
/** * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { synchronized (this) { for (int i = 0; i < timedOutItems.length; i++) { NumberReplicas num = countNodes(timedOutItems[i]); neededReplications.add(timedOutItems[i], num.liveReplicas(), num.decommissionedReplicas(), getReplication(timedOutItems[i])); } } /* If we know the the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } }
/** * Check all blocks of a file. If any blocks are lower than their intended * replication factor, then insert them into neededReplication */ private void checkReplicationFactor(INodeFile file) { int numExpectedReplicas = file.getReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { // filter out containingNodes that are marked for decommission. NumberReplicas number = countNodes(pendingBlocks[i]); if (number.liveReplicas() < numExpectedReplicas) { neededReplications.add(pendingBlocks[i], number.liveReplicas(), number.decommissionedReplicas, numExpectedReplicas); } } }
/** * Return a tuple of the replica state (number racks, number live * replicas, and number needed replicas) for the given block. * @param namenode to proxy the invocation to. */ public static int[] getReplicaInfo(NameNode namenode, Block b) { FSNamesystem ns = namenode.getNamesystem(); ns.readLock(); int[] r = {ns.blockManager.getNumberOfRacks(b), ns.blockManager.countNodes(b).liveReplicas(), ns.blockManager.neededReplications.contains(b) ? 1 : 0}; ns.readUnlock(); return r; } }
void updateNeededReplications(Block block, int curReplicasDelta, int expectedReplicasDelta) { writeLock(); try { NumberReplicas repl = countNodes(block); int curExpectedReplicas = getReplication(block); neededReplications.update(block, repl.liveReplicas(), repl.decommissionedReplicas(), curExpectedReplicas, curReplicasDelta, expectedReplicasDelta); } finally { writeUnlock(); } }
synchronized void updateNeededReplications(Block block, int curReplicasDelta, int expectedReplicasDelta) { NumberReplicas repl = countNodes(block); int curExpectedReplicas = getReplication(block); neededReplications.update(block, repl.liveReplicas(), repl.decommissionedReplicas(), curExpectedReplicas, curReplicasDelta, expectedReplicasDelta); }
/** * Decrement number of blocks that reached minimal replication. */ void decrementSafeBlockCount(Block b) { if (safeMode != null && safeMode.isOn()) { int replication = (short) countNodes(b).liveReplicas(); if (replication == minReplication - 1) { this.blocksSafe--; safeMode.checkMode(); } } }
/** * Decrement number of blocks that reached minimal replication. */ void decrementSafeBlockCount(Block b) { if (safeMode == null) // mostly true return; safeMode.decrementSafeBlockCount((short)countNodes(b).liveReplicas()); }