/** Close/abort all files being written. */ public void closeAllFilesBeingWritten(final boolean abort) { for(;;) { final long inodeId; final DFSOutputStream out; synchronized(filesBeingWritten) { if (filesBeingWritten.isEmpty()) { return; } inodeId = filesBeingWritten.keySet().iterator().next(); out = filesBeingWritten.remove(inodeId); } if (out != null) { try { if (abort) { out.abort(); } else { out.close(); } } catch(IOException ie) { LOG.error("Failed to " + (abort? "abort": "close") + " inode " + inodeId, ie); } } } }
/** Close/abort all files being written. */ public void closeAllFilesBeingWritten(final boolean abort) { for(;;) { final long inodeId; final DFSOutputStream out; synchronized(filesBeingWritten) { if (filesBeingWritten.isEmpty()) { return; } inodeId = filesBeingWritten.keySet().iterator().next(); out = filesBeingWritten.remove(inodeId); } if (out != null) { try { if (abort) { out.abort(); } else { out.close(); } } catch(IOException ie) { LOG.error("Failed to " + (abort? "abort": "close") + " inode " + inodeId, ie); } } } }
/** Close/abort all files being written. */ public void closeAllFilesBeingWritten(final boolean abort) { for(;;) { final long inodeId; final DFSOutputStream out; synchronized(filesBeingWritten) { if (filesBeingWritten.isEmpty()) { return; } inodeId = filesBeingWritten.keySet().iterator().next(); out = filesBeingWritten.remove(inodeId); } if (out != null) { try { if (abort) { out.abort(); } else { out.close(); } } catch(IOException ie) { LOG.error("Failed to " + (abort ? "abort" : "close") + " file: " + out.getSrc() + " with inode: " + inodeId, ie); } } } }
/** * The close() method of DFSOutputStream should never throw the same exception * twice. See HDFS-5335 for details. */ @Test public void testCloseTwice() throws IOException { DistributedFileSystem fs = cluster.getFileSystem(); FSDataOutputStream os = fs.create(new Path("/test")); DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os, "wrappedStream"); @SuppressWarnings("unchecked") AtomicReference<IOException> ex = (AtomicReference<IOException>) Whitebox .getInternalState(dos, "lastException"); Assert.assertEquals(null, ex.get()); dos.close(); IOException dummy = new IOException("dummy"); ex.set(dummy); try { dos.close(); } catch (IOException e) { Assert.assertEquals(e, dummy); } Assert.assertEquals(null, ex.get()); dos.close(); }
assertTrue("There should be atleast 2 nodes in pipeline still", out .getPipeline().length >= 2); out.close(); } finally { DFSClientFaultInjector.set(old);
out.close(); } finally { if (cluster != null) {
out.write(1); out.hflush(); out.close(); FSDataInputStream in = null; ExtendedBlock oldBlock = null;
0, out.getPipelineRecoveryCount()); out.write(1); out.close(); } finally { if (cluster != null) {
streams[i].close(); } catch (IOException ioe) {