@Override public String toString() { return "TcpPeerServer(" + getListeningString() + ")"; } }
@Override public InnerNodeImpl newInnerNode(String path) { return new DFSTopologyNodeImpl(path); } }
@Override public String toString() { return "DomainPeerServer(" + getListeningString() + ")"; } }
private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(datanode.getTracer()); this.peer = peer; this.dnConf = datanode.getDnConf(); this.socketIn = peer.getInputStream(); this.socketOut = peer.getOutputStream(); this.datanode = datanode; this.dataXceiverServer = dataXceiverServer; this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname; this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf()); this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf()); remoteAddress = peer.getRemoteAddressString(); final int colonIdx = remoteAddress.indexOf(':'); remoteAddressWithoutPort = (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx); localAddress = peer.getLocalAddressString(); LOG.debug("Number of active connections is: {}", datanode.getXceiverCount()); }
@Override public Peer accept() throws IOException, SocketTimeoutException { DomainSocket connSock = sock.accept(); Peer peer = null; boolean success = false; try { peer = new DomainPeer(connSock); success = true; return peer; } finally { if (!success) { if (peer != null) peer.close(); connSock.close(); } } }
tcpPeerServer = new TcpPeerServer(secureResources); } else { int backlogLength = getConf().getInt( CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_KEY, CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT); tcpPeerServer = new TcpPeerServer(dnConf.socketWriteTimeout, DataNode.getStreamingAddr(getConf()), backlogLength); tcpPeerServer.setReceiveBufferSize( dnConf.getTransferSocketRecvBufferSize()); streamingAddr = tcpPeerServer.getStreamingAddr(); LOG.info("Opened streaming server at {}", streamingAddr); this.threadGroup = new ThreadGroup("dataXceiverServer"); new DataXceiverServer(domainPeerServer, getConf(), this)); LOG.info("Listening on UNIX domain socket: {}", domainPeerServer.getBindPath());
new DomainPeerServer(domainSocketPath, port); int recvBufferSize = conf.getInt( DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY, DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT); if (recvBufferSize > 0) { domainPeerServer.setReceiveBufferSize(recvBufferSize);
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) { Node chosenNode; if (root.isRack()) { for (Node node : root.getChildren()) { if (node.equals(excludeRoot)) { continue; getEligibleChildren(root, excludeRoot, type); if (candidates.size() == 0) { return null; for (int i = 0; i < candidates.size(); i++) { DFSTopologyNodeImpl innerNode = candidates.get(i); int subTreeCount = innerNode.getSubtreeStorageCount(type); totalCounts += subTreeCount; countArray[i] = subTreeCount; chosenNode = chooseRandomWithStorageTypeAndExcludeRoot( nextRoot, excludeRoot, type);
ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>(); int excludeCount = 0; if (excludeRoot != null && root.isAncestor(excludeRoot)) { .getSubtreeStorageCount(type); } else { for (Node node : root.getChildren()) { DFSTopologyNodeImpl dfsNode = (DFSTopologyNodeImpl) node; int storageCount = dfsNode.getSubtreeStorageCount(type); if (excludeRoot != null && excludeCount != 0 && (dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) { storageCount -= excludeCount;
Node node = getNode(scope); if (node == null) { LOG.debug("Invalid scope {}, non-existing node", scope); Node excludeRoot = excludedScope == null ? null : getNode(excludedScope); int availableCount = root.getSubtreeStorageCount(type); if (excludeRoot != null && root.isAncestor(excludeRoot)) { if (excludeRoot instanceof DFSTopologyNodeImpl) { availableCount -= ((DFSTopologyNodeImpl)excludeRoot) .getSubtreeStorageCount(type); } else { availableCount -= ((DatanodeDescriptor)excludeRoot) } else if (excludedNode instanceof DFSTopologyNodeImpl) { availableCount -= ((DFSTopologyNodeImpl) excludedNode) .getSubtreeStorageCount(type); } else if (excludedNode instanceof DatanodeInfo) { DatanodeDescriptor dn = (DatanodeDescriptor)getNode(nodeLocation); availableCount -= dn.hasStorageType(type)? 1 : 0; } else { chosen = chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot, type); if (excludedNodes == null || !excludedNodes.contains(chosen)) {
private DFSTopologyNodeImpl createParentNode(String parentName) { return new DFSTopologyNodeImpl( parentName, getPath(this), this, this.getLevel() + 1); }
void kill() { assert (datanode.shouldRun == false || datanode.shutdownForUpgrade) : "shoudRun should be set to false or restarting should be true" + " before killing"; try { this.peerServer.close(); this.closed = true; } catch (IOException ie) { LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie); } }
storageTypeCounts.put(type, 1); if (getParent() != null) { ((DFSTopologyNodeImpl)getParent()).childAddStorage(getName(), type);
/** * Called by a child node of the current node to decrement a storage count. * * @param childName the name of the child removing a storage type. * @param type the type being removed. */ public synchronized void childRemoveStorage( String childName, StorageType type) { LOG.debug("child remove storage: {}:{}", childName, type); Preconditions.checkArgument(childrenStorageInfo.containsKey(childName)); EnumMap<StorageType, Integer> typeCount = childrenStorageInfo.get(childName); Preconditions.checkArgument(typeCount.containsKey(type)); if (typeCount.get(type) > 1) { typeCount.put(type, typeCount.get(type) - 1); } else { typeCount.remove(type); } Preconditions.checkArgument(storageTypeCounts.containsKey(type)); if (storageTypeCounts.get(type) > 1) { storageTypeCounts.put(type, storageTypeCounts.get(type) - 1); } else { storageTypeCounts.remove(type); } if (getParent() != null) { ((DFSTopologyNodeImpl)getParent()).childRemoveStorage(getName(), type); } } }
parent.childAddStorage(getName(), s.getStorageType()); parent.childAddStorage(getName(), newType); parent.childRemoveStorage(getName(), oldType);
@Override protected DatanodeDescriptor chooseDataNode(final String scope, final Collection<Node> excludedNode, StorageType type) { // only the code that uses DFSNetworkTopology should trigger this code path. Preconditions.checkArgument(clusterMap instanceof DFSNetworkTopology); DFSNetworkTopology dfsClusterMap = (DFSNetworkTopology)clusterMap; DatanodeDescriptor a = (DatanodeDescriptor) dfsClusterMap .chooseRandomWithStorageType(scope, excludedNode, type); DatanodeDescriptor b = (DatanodeDescriptor) dfsClusterMap .chooseRandomWithStorageType(scope, excludedNode, type); return select(a, b); }
/** * Choose a datanode from the given <i>scope</i> with specified * storage type. * @return the chosen node, if there is any. */ protected DatanodeDescriptor chooseDataNode(final String scope, final Collection<Node> excludedNodes, StorageType type) { return (DatanodeDescriptor) ((DFSNetworkTopology) clusterMap) .chooseRandomWithStorageTypeTwoTrial(scope, excludedNodes, type); }
public static DFSNetworkTopology getInstance(Configuration conf) { DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass( DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY, DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT, DFSNetworkTopology.class), conf); return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY); }
void injectStorage(DatanodeStorageInfo s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (null == storage) { LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(), getXferAddr()); DFSTopologyNodeImpl parent = null; if (getParent() instanceof DFSTopologyNodeImpl) { parent = (DFSTopologyNodeImpl) getParent(); } StorageType type = s.getStorageType(); if (!hasStorageType(type) && parent != null) { // we are about to add a type this node currently does not have, // inform the parent that a new type is added to this datanode parent.childAddStorage(getName(), type); } storageMap.put(s.getStorageID(), s); } else { assert storage == s : "found " + storage + " expected " + s; } } }
/** * Randomly choose one node from <i>scope</i>, with specified storage type. * * If scope starts with ~, choose one from the all nodes except for the * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>. * If excludedNodes is given, choose a node that's not in excludedNodes. * * @param scope range of nodes from which a node will be chosen * @param excludedNodes nodes to be excluded from * @param type the storage type we search for * @return the chosen node */ public Node chooseRandomWithStorageType(final String scope, final Collection<Node> excludedNodes, StorageType type) { netlock.readLock().lock(); try { if (scope.startsWith("~")) { return chooseRandomWithStorageType( NodeBase.ROOT, scope.substring(1), excludedNodes, type); } else { return chooseRandomWithStorageType( scope, null, excludedNodes, type); } } finally { netlock.readLock().unlock(); } }