@Override public void close() throws IOException { delegate.close(); }
@Override public void close() throws IOException { if (this.recordReader != null) { // enforce sequential close() calls synchronized (CLOSE_MUTEX) { this.recordReader.close(); } } }
@Override public void close() throws IOException { if (curReader != null) { curReader.close(); curReader = null; } }
@Override public void doClose() throws IOException { if (recordReader != null) { recordReader.close(); recordReader = null; } idx = 0; }
@Override public void close() throws IOException { vrbReader.close(); batch.cols = null; }
private void closeReader() throws IOException { if (this.rr != null) { this.rr.close(); this.rr = null; } }
@Override public void doClose() throws IOException { recordReader.close(); }
@Override public void close() throws IOException { rr.close(); }
@Override public void doClose() throws IOException { recordReader.close(); }
@Override public void close() throws IOException { this.recordReader.close(); } }
@Override public void close() throws IOException { baseRecordReader.close(); }
@Override public void close() throws IOException { reader.close(); }
@Override public void close() throws IOException { sourceReader.close(); }
private void closeReader() throws IOException { if (this.rr != null) { this.rr.close(); this.rr = null; } }
@Override public void doClose() throws IOException { if (recordReader != null) { recordReader.close(); recordReader = null; } idx = 0; }
@Override public void close() throws IOException { vrbReader.close(); batch.cols = null; }
@Override public void close() { // some hive input formats are broken and bad things can happen if you close them multiple times if (closed) { return; } closed = true; updateCompletedBytes(); try { recordReader.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } }
@Override public void close() throws IOException { try { this.baseReader.close(); } finally { this.deleteEventRegistry.close(); } }
protected org.apache.hadoop.mapred.RecordReader setReaderAtSplit(int splitNum) throws IOException { JobConf localJc = getLocalFSJobConfClone(jc); currentSplitPointer = splitNum; if ( rr != null ) { rr.close(); } // open record reader to read next split rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer], jobCloneUsingLocalFs, reporter); currentSplitPointer++; return rr; }
protected org.apache.hadoop.mapred.RecordReader setReaderAtSplit(int splitNum) throws IOException { JobConf localJc = getLocalFSJobConfClone(jc); currentSplitPointer = splitNum; if ( rr != null ) { rr.close(); } // open record reader to read next split rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer], jobCloneUsingLocalFs, reporter); currentSplitPointer++; return rr; }