/** {@inheritDoc} */ public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, Block block, short replicationFactor, Collection<DatanodeDescriptor> first, Collection<DatanodeDescriptor> second) { long minSpace = Long.MAX_VALUE; DatanodeDescriptor cur = null; // pick replica from the first Set. If first is empty, then pick replicas // from second set. Iterator<DatanodeDescriptor> iter = first.isEmpty() ? second.iterator() : first.iterator(); // pick node with least free space while (iter.hasNext() ) { DatanodeDescriptor node = iter.next(); long free = node.getRemaining(); if (minSpace > free) { minSpace = free; cur = node; } } return cur; }
remaining = datanode.getRemaining(); nonDFSUsed = datanode.getNonDfsUsed(); configCapacity = datanode.getCapacity();
/** * Public method that serializes the information about a * Datanode to be stored in the fsImage. */ public void write(DataOutput out) throws IOException { new DatanodeID(node).write(out); out.writeLong(node.getCapacity()); out.writeLong(node.getRemaining()); out.writeLong(node.getLastUpdate()); out.writeInt(node.getXceiverCount()); }
/** * Public method that serializes the information about a * Datanode to be stored in the fsImage. */ public void write(DataOutput out) throws IOException { new DatanodeID(node).write(out); out.writeLong(node.getCapacity()); out.writeLong(node.getRemaining()); out.writeLong(node.getLastUpdate()); out.writeInt(node.getXceiverCount()); }
private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // assert(Thread.holdsLock(heartbeats)); if (isAdded) { capacityTotal += node.getCapacity(); capacityUsed += node.getDfsUsed(); capacityRemaining += node.getRemaining(); totalLoad += node.getXceiverCount(); } else { capacityTotal -= node.getCapacity(); capacityUsed -= node.getDfsUsed(); capacityRemaining -= node.getRemaining(); totalLoad -= node.getXceiverCount(); } } /**
while( iter.hasNext() ) { DatanodeDescriptor node = iter.next(); long free = node.getRemaining();
long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize); " is not chosen because the node does not have enough space" + " for block size " + blockSize + " with Remaining = " + node.getRemaining() + " and Scheduled = " + node.getBlocksScheduled());
long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize);
private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // For decommissioning/decommissioned nodes, only used capacity // is counted. // assert (Thread.holdsLock(heartbeats)); if (isAdded) { capacityUsed += node.getDfsUsed(); capacityNamespaceUsed += node.getNamespaceUsed(); totalLoad += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); } else { capacityTotal += node.getDfsUsed(); } } else { capacityUsed -= node.getDfsUsed(); capacityNamespaceUsed -= node.getNamespaceUsed(); totalLoad -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); } else { capacityTotal -= node.getDfsUsed(); } } }
nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(null, cmds);