/** * This method gets the pipeline for the current WAL. */ @Override DatanodeInfo[] getPipeline() { if (this.hdfs_out != null) { if (this.hdfs_out.getWrappedStream() instanceof DFSOutputStream) { return ((DFSOutputStream) this.hdfs_out.getWrappedStream()).getPipeline(); } } return new DatanodeInfo[0]; } }
int current = expectedReplication; try { current = ((DFSOutputStream) logFile.getWrappedStream()).getCurrentBlockReplication(); } catch (IOException e) { fail(work, e, "getting replication level"); try { expectedReplication = ((DFSOutputStream) logFile.getWrappedStream()) .getCurrentBlockReplication(); } catch (IOException e) { fail(work, e, "getting replication level");
private synchronized void writeChunkImpl(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed(); bytesPerChecksum); if (cklen != 0 && cklen != getChecksumSize()) { throw new IOException("writeChunk() checksum size is supposed to be " + getChecksumSize() + " but found to be " + cklen); currentPacket = createPacket(packetSize, chunksPerPacket, bytesCurBlock, currentSeqno++, false); if (DFSClient.LOG.isDebugEnabled()) { ", appendChunk=" + appendChunk); waitAndQueueCurrentPacket(); adjustChunkBoundary(); currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true); currentPacket.setSyncBlock(shouldSyncBlock); waitAndQueueCurrentPacket(); bytesCurBlock = 0; lastFlushOffset = 0;
/** * Sync buffered data to DataNodes (flush to disk devices). * * @param syncFlags * Indicate the detailed semantic and actions of the hsync. * @throws IOException * @see FSDataOutputStream#hsync() */ public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException { OutputStream wrappedStream = getWrappedStream(); if (wrappedStream instanceof CryptoOutputStream) { wrappedStream.flush(); wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream(); } ((DFSOutputStream) wrappedStream).hsync(syncFlags); }
/** * Same as {{@link #create(String, FsPermission, EnumSet, short, long, * Progressable, int, ChecksumOpt)} except that the permission * is absolute (ie has already been masked with umask. */ public DFSOutputStream primitiveCreate(String src, FsPermission absPermission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, ChecksumOpt checksumOpt) throws IOException, UnresolvedLinkException { checkOpen(); CreateFlag.validate(flag); DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress); if (result == null) { DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt); result = DFSOutputStream.newStreamForCreate(this, src, absPermission, flag, createParent, replication, blockSize, progress, buffersize, checksum, null); } beginFileLease(result.getFileId(), result); return result; }
DatanodeInfo[] pipeline = out.getPipeline(); for (DatanodeInfo node : pipeline) { assertFalse("Write should be going on", failed.get()); final long oldGs = out.getBlock().getGenerationStamp(); MiniDFSCluster.DataNodeProperties dnProps = cluster .stopDataNodeForUpgrade(indexToShutdown); out.getPipelineRecoveryCount()); out.write("testagain".getBytes()); assertTrue("There should be atleast 2 nodes in pipeline still", out .getPipeline().length >= 2); out.close(); } finally { DFSClientFaultInjector.set(old);
streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl) .getWrappedStream(); streams[i].write("1".getBytes()); streams[i].hsync(); for (DatanodeInfo dni : streams[i].getPipeline()) { DatanodeDescriptor dnd = dnm.getDatanode(dni); expectedTotalLoad -= 2; streams[i].close(); } catch (IOException ioe) {
final DFSOutputStream out = (DFSOutputStream) (fileSys.append(file). getWrappedStream()); out.write(1); out.hflush(); final long oldGs = out.getBlock().getGenerationStamp(); MiniDFSCluster.DataNodeProperties dnProps = cluster.stopDataNodeForUpgrade(0); 0, out.getPipelineRecoveryCount()); out.write(1); out.close(); } finally { if (cluster != null) {
dfstream.setChunksPerPacket(5); dfstream.setArtificialSlowdown(3000); stm.write(buffer, 0, mid); DatanodeInfo[] targets = dfstream.getPipeline(); int count = 5; while (count-- > 0 && targets == null) { } catch (InterruptedException e) { targets = dfstream.getPipeline();
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes) throws IOException { if(stat.getErasureCodingPolicy() != null) { throw new IOException( "Not support appending to a striping layout file yet."); } try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForAppend", src)) { final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock, stat, checksum, favoredNodes); out.start(); return out; } }
/** Close/abort all files being written. */ public void closeAllFilesBeingWritten(final boolean abort) { for(;;) { final long inodeId; final DFSOutputStream out; synchronized(filesBeingWritten) { if (filesBeingWritten.isEmpty()) { return; } inodeId = filesBeingWritten.keySet().iterator().next(); out = filesBeingWritten.remove(inodeId); } if (out != null) { try { if (abort) { out.abort(); } else { out.close(); } } catch(IOException ie) { LOG.error("Failed to " + (abort ? "abort" : "close") + " file: " + out.getSrc() + " with inode: " + inodeId, ie); } } } }
@Deprecated public void sync() throws IOException { hflush(); }
/** Close/abort all files being written. */ public void closeAllFilesBeingWritten(final boolean abort) { for(;;) { final long inodeId; final DFSOutputStream out; synchronized(filesBeingWritten) { if (filesBeingWritten.isEmpty()) { return; } inodeId = filesBeingWritten.keySet().iterator().next(); out = filesBeingWritten.remove(inodeId); } if (out != null) { try { if (abort) { out.abort(); } else { out.close(); } } catch(IOException ie) { LOG.error("Failed to " + (abort? "abort": "close") + " inode " + inodeId, ie); } } } }
synchronized void enqueueCurrentPacketFull() throws IOException { LOG.debug("enqueue full {}, src={}, bytesCurBlock={}, blockSize={}," + " appendChunk={}, {}", currentPacket, src, getStreamer() .getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(), getStreamer()); enqueueCurrentPacket(); adjustChunkBoundary(); endBlock(); }
public static long getFileId(DFSOutputStream out) { return out.getFileId(); } }
@VisibleForTesting ExtendedBlock getPreviousBlock(long fileId) { return filesBeingWritten.get(fileId).getBlock(); }
Random rb = new Random(1111); rb.nextBytes(toWrite); s1.write(toWrite, 0, 1024*1024*8); s1.flush(); s2.write(toWrite, 0, 1024*1024*8); s2.flush(); s3.write(toWrite, 0, 1024*1024*8); s3.flush(); s1.close(); s2.close(); s3.close(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver());