/** * Close writer. * * @throws IOException In case of IO exception. */ public void closeWriter() throws IOException { if (writer != null) writer.close(Reporter.NULL); }
@Override public void close(boolean abort) throws IOException { //close with null reporter mWriter.close(null); } }
@Override public void close(Reporter reporter) throws IOException { this.mWriter.close(reporter); } }
public void close(Reporter reporter) throws IOException { this.mWriter.close(reporter); } }
@Override public void close(Reporter reporter) throws IOException { for (org.apache.hadoop.mapred.RecordWriter writer : writers) { writer.close(reporter); } }
@Override public void close(Reporter reporter) throws IOException { this.mWriter.close(reporter); } }
@Override public void close(boolean abort) throws IOException { //close with null reporter mWriter.close(null); } }
public void close(Reporter reporter) throws IOException { writer.close(reporter); } }
public void close(Reporter reporter) throws IOException { this.mWriter.close(reporter); } }
/** * Closes all the opened named outputs. * <p/> * If overriden subclasses must invoke <code>super.close()</code> at the * end of their <code>close()</code> * * @throws java.io.IOException thrown if any of the MultipleOutput files * could not be closed properly. */ public void close() throws IOException { for (RecordWriter writer : recordWriters.values()) { writer.close(null); } }
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { Reporter reporter = InternalUtil.createReporter(context); getBaseRecordWriter().close(reporter); }
@Override public void close(TaskAttemptContext context) throws IOException, InterruptedException { getBaseRecordWriter().close(InternalUtil.createReporter(context)); }
/** * commit the task by moving the output file out from the temporary directory. * @throws java.io.IOException */ @Override public void close() throws IOException { // enforce sequential close() calls synchronized (CLOSE_MUTEX) { this.recordWriter.close(new HadoopDummyReporter()); if (this.outputCommitter.needsTaskCommit(this.context)) { this.outputCommitter.commitTask(this.context); } } }
@Override protected void closeOp(boolean abort) throws HiveException { try { if(!wroteData) { //Send a schema only batch to signal EOS with no data written ArrowWrapperWritable writable = converter.emptyBatch(); if(recordWriter == null) { recordWriter = LlapOutputFormatService.get().getWriter(this.attemptId); } recordWriter.write(null, writable); } } catch(Exception e) { LOG.error("Failed to write Arrow stream schema"); throw new RuntimeException(e); } finally { try { //Close the recordWriter with null Reporter recordWriter.close(null); } catch(Exception e) { LOG.error("Failed to close Arrow stream"); throw new RuntimeException(e); } } }
bwriter.close(reporter);
/** * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase * Connection (ZK) resources, and will throw exception if they are exhausted. */ static void openCloseTableOutputFormat(int iter) throws IOException { LOG.info("Instantiating TableOutputFormat connection " + iter); JobConf conf = new JobConf(); conf.addResource(UTIL.getConfiguration()); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE); TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, conf); TableOutputFormat tof = new TableOutputFormat(); RecordWriter rw = tof.getRecordWriter(null, conf, TABLE, null); rw.close(null); }
writer.close(null);
writer.write(NullWritable.get(), serde.serialize(new SimpleRow(null), inspector)); writer.close(Reporter.NULL); serde = new OrcSerde(); SearchArgument sarg =
writer.write(NullWritable.get(), serde.serialize(new NestedRow(7,8,9), inspector)); writer.close(Reporter.NULL); serde = new OrcSerde(); SearchArgument sarg =
rw.write(new Text("k4"), new Text("v4")); rw.close(null);