/** * Note that this is not a public API; * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead. * * @return the number of valid replicas of the current block */ public synchronized int getCurrentBlockReplication() throws IOException { dfsClient.checkOpen(); checkClosed(); if (streamer == null) { return blockReplication; // no pipeline, return repl factor of file } DatanodeInfo[] currentNodes = streamer.getNodes(); if (currentNodes == null) { return blockReplication; // no pipeline, return repl factor of file } return currentNodes.length; }
/** * Note that this is not a public API; * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead. * * @return the number of valid replicas of the current block */ public synchronized int getCurrentBlockReplication() throws IOException { dfsClient.checkOpen(); checkClosed(); if (streamer == null) { return blockReplication; // no pipeline, return repl factor of file } DatanodeInfo[] currentNodes = streamer.getNodes(); if (currentNodes == null) { return blockReplication; // no pipeline, return repl factor of file } return currentNodes.length; }
synchronized (dataQueue) { while (!isClosed()) { checkClosed(); if (lastAckedSeqno >= seqno) { break; checkClosed(); } catch (ClosedChannelException e) {
/** * Waits till all existing data is flushed and confirmations * received from datanodes. */ private void flushInternal() throws IOException { long toWaitFor; synchronized (this) { dfsClient.checkOpen(); checkClosed(); // // If there is data in the current buffer, send it across // queueCurrentPacket(); toWaitFor = lastQueuedSeqno; } waitForAckedSeqno(toWaitFor); }
/** * Waits till all existing data is flushed and confirmations * received from datanodes. */ private void flushInternal() throws IOException { long toWaitFor; synchronized (this) { dfsClient.checkOpen(); checkClosed(); // // If there is data in the current buffer, send it across // queueCurrentPacket(); toWaitFor = lastQueuedSeqno; } waitForAckedSeqno(toWaitFor); }
checkClosed(); queueCurrentPacket(); } catch (ClosedChannelException e) {
checkClosed(); queueCurrentPacket(); } catch (ClosedChannelException e) {
synchronized (dataQueue) { while (!isClosed()) { checkClosed(); if (lastAckedSeqno >= seqno) { break; checkClosed(); } catch (ClosedChannelException e) {
protected long flushInternalWithoutWaitingAck() throws IOException { long toWaitFor; synchronized (this) { dfsClient.checkOpen(); checkClosed(); // // If there is data in the current buffer, send it across // getStreamer().queuePacket(currentPacket); currentPacket = null; toWaitFor = getStreamer().getLastQueuedSeqno(); } return toWaitFor; }
/** * Note that this is not a public API; * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead. * * @return the number of valid replicas of the current block */ public synchronized int getCurrentBlockReplication() throws IOException { dfsClient.checkOpen(); checkClosed(); if (getStreamer().streamerClosed()) { return blockReplication; // no pipeline, return repl factor of file } DatanodeInfo[] currentNodes = getStreamer().getNodes(); if (currentNodes == null) { return blockReplication; // no pipeline, return repl factor of file } return currentNodes.length; }
private synchronized void writeChunkPrepare(int buflen, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed(); if (buflen > bytesPerChecksum) { throw new IOException("writeChunk() buffer size is " + buflen + " is larger than supported bytesPerChecksum " + bytesPerChecksum); } if (cklen != 0 && cklen != getChecksumSize()) { throw new IOException("writeChunk() checksum size is supposed to be " + getChecksumSize() + " but found to be " + cklen); } if (currentPacket == null) { currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer() .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false); DFSClient.LOG.debug("WriteChunk allocating new packet seqno={}," + " src={}, packetSize={}, chunksPerPacket={}, bytesCurBlock={}", currentPacket.getSeqno(), src, packetSize, chunksPerPacket, getStreamer().getBytesCurBlock() + ", " + this); } }
throws IOException { dfsClient.checkOpen(); checkClosed(); try { long toWaitFor; checkClosed();
private synchronized void writeChunkImpl(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed();
throws IOException { dfsClient.checkOpen(); checkClosed(); try { long toWaitFor; checkClosed();
private synchronized void writeChunkImpl(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed();
throws IOException { dfsClient.checkOpen(); checkClosed(); try { long toWaitFor; checkClosed();