/** * This is a drop-in replacement for * {@link Socket#connect(SocketAddress, int)}. * In the case of normal sockets that don't have associated channels, this * just invokes <code>socket.connect(endpoint, timeout)</code>. If * <code>socket.getChannel()</code> returns a non-null channel, * connect is implemented using Hadoop's selectors. This is done mainly * to avoid Sun's connect implementation from creating thread-local * selectors, since Hadoop does not have control on when these are closed * and could end up taking all the available file descriptors. * * @see java.net.Socket#connect(java.net.SocketAddress, int) * * @param socket * @param address the remote address * @param timeout timeout in milliseconds */ public static void connect(Socket socket, SocketAddress address, int timeout) throws IOException { connect(socket, address, null, timeout); }
protected void checkHdfsUriForTimeout(Configuration config) throws IOException { URI hdfsUri = FileSystem.getDefaultUri(config); String address = hdfsUri.getAuthority(); int port = hdfsUri.getPort(); if (address == null || address.isEmpty() || port < 0) { return; } InetSocketAddress namenode = NetUtils.createSocketAddr(address, port); SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(config); Socket socket = null; try { socket = socketFactory.createSocket(); NetUtils.connect(socket, namenode, 1000); // 1 second timeout } finally { IOUtils.closeQuietly(socket); } }
this.socket.bind(this.rpcClient.localAddr); NetUtils.connect(this.socket, remoteId.getAddress(), this.rpcClient.connectTO); this.socket.setSoTimeout(this.rpcClient.readTO); return;
NetUtils.connect(this.socket, server, bindAddr, connectionTimeout); this.socket.setSoTimeout(soTimeout); return;
this.socket.bind(this.rpcClient.localAddr); NetUtils.connect(this.socket, remoteId.getAddress(), this.rpcClient.connectTO); this.socket.setSoTimeout(this.rpcClient.readTO); return;
private Peer newConnectedPeer(ExtendedBlock b, InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; Socket sock = null; final int socketTimeout = datanode.getDnConf().getSocketTimeout(); try { sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); NetUtils.connect(sock, addr, socketTimeout); peer = DFSUtilClient.peerFromSocketAndKey(datanode.getSaslClient(), sock, datanode.getDataEncryptionKeyFactoryForBlock(b), blockToken, datanodeId, socketTimeout); success = true; return peer; } finally { if (!success) { IOUtils.cleanup(null, peer); IOUtils.closeSocket(sock); } } }
DataInputStream in = null; try { NetUtils.connect(sock, NetUtils.createSocketAddr( blkMovingInfo.getTarget().getXferAddr(connectToDnViaHostname)),
LOG.debug("Connecting to datanode {}", dnAddr); sock = newSocket(); NetUtils.connect(sock, curTarget, dnConf.socketTimeout); sock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay()); sock.setSoTimeout(targets.length * dnConf.socketTimeout);
InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr); proxySock = datanode.newSocket(); NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout); proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay()); proxySock.setSoTimeout(dnConf.socketTimeout);
/** * This is a drop-in replacement for * {@link Socket#connect(SocketAddress, int)}. * In the case of normal sockets that don't have associated channels, this * just invokes <code>socket.connect(endpoint, timeout)</code>. If * <code>socket.getChannel()</code> returns a non-null channel, * connect is implemented using Hadoop's selectors. This is done mainly * to avoid Sun's connect implementation from creating thread-local * selectors, since Hadoop does not have control on when these are closed * and could end up taking all the available file descriptors. * * @see java.net.Socket#connect(java.net.SocketAddress, int) * * @param socket * @param address the remote address * @param timeout timeout in milliseconds */ public static void connect(Socket socket, SocketAddress address, int timeout) throws IOException { connect(socket, address, null, timeout); }
/** * This is a drop-in replacement for * {@link Socket#connect(SocketAddress, int)}. * In the case of normal sockets that don't have associated channels, this * just invokes <code>socket.connect(endpoint, timeout)</code>. If * <code>socket.getChannel()</code> returns a non-null channel, * connect is implemented using Hadoop's selectors. This is done mainly * to avoid Sun's connect implementation from creating thread-local * selectors, since Hadoop does not have control on when these are closed * and could end up taking all the available file descriptors. * * @see java.net.Socket#connect(java.net.SocketAddress, int) * * @param socket * @param address the remote address * @param timeout timeout in milliseconds */ public static void connect(Socket socket, SocketAddress address, int timeout) throws IOException { connect(socket, address, null, timeout); }
stripedWriter.getSocketAddress4Transfer(target); socket = datanode.newSocket(); NetUtils.connect(socket, targetAddr, datanode.getDnConf().getSocketTimeout()); socket.setTcpNoDelay(
/** * This is a drop-in replacement for * {@link Socket#connect(SocketAddress, int)}. * In the case of normal sockets that don't have associated channels, this * just invokes <code>socket.connect(endpoint, timeout)</code>. If * <code>socket.getChannel()</code> returns a non-null channel, * connect is implemented using Hadoop's selectors. This is done mainly * to avoid Sun's connect implementation from creating thread-local * selectors, since Hadoop does not have control on when these are closed * and could end up taking all the available file descriptors. * * @see java.net.Socket#connect(java.net.SocketAddress, int) * * @param socket * @param address the remote address * @param timeout timeout in milliseconds */ public static void connect(Socket socket, SocketAddress address, int timeout) throws IOException { connect(socket, address, null, timeout); }
int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay()); mirrorSock.setSoTimeout(timeoutValue);
public static boolean checkConnection(InetSocketAddress address) { boolean isAlive = true; Socket socket = null; try { int connectionTimeout = 10; socket = socketFactory.createSocket(); NetUtils.connect(socket, address, connectionTimeout); } catch (Exception e) { isAlive = false; } finally { if (socket != null) { try { socket.close(); } catch (IOException e) { LOG.debug(e.getMessage(), e); } } } return isAlive; } }
public static boolean checkConnection(InetSocketAddress address) { boolean isAlive = true; Socket socket = null; try { int connectionTimeout = 10; socket = socketFactory.createSocket(); NetUtils.connect(socket, address, connectionTimeout); } catch (Exception e) { isAlive = false; } finally { if (socket != null) { try { socket.close(); } catch (IOException e) { LOG.debug(e.getMessage(), e); } } } return isAlive; } }
protected void checkHdfsUriForTimeout(Configuration config) throws IOException { URI hdfsUri = FileSystem.getDefaultUri(config); String address = hdfsUri.getAuthority(); int port = hdfsUri.getPort(); if (address == null || address.isEmpty() || port < 0) { return; } InetSocketAddress namenode = NetUtils.createSocketAddr(address, port); SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(config); Socket socket = null; try { socket = socketFactory.createSocket(); NetUtils.connect(socket, namenode, 1000); // 1 second timeout } finally { IOUtils.closeQuietly(socket); } }
@Override public void run() { Socket sock = null; try { sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); NetUtils.connect(sock, addr, 3000); try { Thread.sleep(4000); } catch (InterruptedException ie) { } } catch (IOException ioe) { } finally { if (sock != null) { try { sock.close(); } catch (IOException ioe) { } } } } };
@Override public void run() { Socket sock = null; try { sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); NetUtils.connect(sock, addr, 3000); try { Thread.sleep(4000); } catch (InterruptedException ie) { } } catch (IOException ioe) { } finally { if (sock != null) { try { sock.close(); } catch (IOException ioe) { } } } } };
@Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; Socket sock = null; final int socketTimeout = dfsClientConf.getSocketTimeout(); try { sock = socketFactory.createSocket(); NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), socketTimeout); peer = DFSUtilClient.peerFromSocketAndKey(saslClient, sock, this, blockToken, datanodeId, socketTimeout); success = true; return peer; } finally { if (!success) { IOUtilsClient.cleanupWithLogger(LOG, peer); IOUtils.closeSocket(sock); } } }