/** Open a writer appending to an existing file. */ public DataFileWriter<D> appendTo(File file) throws IOException { return appendTo(new SeekableFileInput(file), new SyncableFileOutputStream(file, true)); }
/** Open a writer appending to an existing file. */ public DataFileWriter<D> appendTo(File file) throws IOException { SeekableInput input = null; try { input = new SeekableFileInput(file); OutputStream output = new SyncableFileOutputStream(file, true); return appendTo(input, output); } finally { if (input != null) input.close(); // output does not need to be closed here. It will be closed by invoking close() of this writer. } }
private void appendAvroRecords(ProcessSession session, byte[] avroHeader, DataFileWriter<GenericRecord> writer, AtomicReference<FlowFile> flowFileRef, List<HiveStreamingRecord> hRecords) { flowFileRef.set(session.append(flowFileRef.get(), (out) -> { if (hRecords != null) { // Initialize the writer again as append mode, so that Avro header is written only once. writer.appendTo(new SeekableByteArrayInput(avroHeader), out); try { for (HiveStreamingRecord hRecord : hRecords) { writer.append(hRecord.getRecord()); } } catch (IOException ioe) { // The records were put to Hive Streaming successfully, but there was an error while writing the // Avro records to the flow file. Log as an error and move on. logger.error("Error writing Avro records (which were sent successfully to Hive Streaming) to the flow file, " + ioe, ioe); } } writer.close(); })); }
writer.appendTo(trackerFile);
DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>()) .appendTo(file); try { for (Object datum : new RandomData(SCHEMA, COUNT, SEED+1)) {
SeekableFileInput in = new SeekableFileInput(f); try { writer.appendTo(in, out); } finally { in.close();
new DataFileWriter<>(new GenericDatumWriter<>()) .setSyncInterval(syncInterval); concatinto.appendTo(file1); DataFileReader<Object> concatfrom = new DataFileReader<>(file2, new GenericDatumReader<>());
/** Open a writer appending to an existing file. */ public DataFileWriter<D> appendTo(File file) throws IOException { return appendTo(new SeekableFileInput(file), new FileOutputStream(file, true)); }
/** Open a writer appending to an existing file. */ public DataFileWriter<D> appendTo(File file) throws IOException { return appendTo(new SeekableFileInput(file), new SyncableFileOutputStream(file, true)); }
private void appendAvroRecords(ProcessSession session, byte[] avroHeader, DataFileWriter<GenericRecord> writer, AtomicReference<FlowFile> flowFileRef, List<HiveStreamingRecord> hRecords) { flowFileRef.set(session.append(flowFileRef.get(), (out) -> { if (hRecords != null) { // Initialize the writer again as append mode, so that Avro header is written only once. writer.appendTo(new SeekableByteArrayInput(avroHeader), out); try { for (HiveStreamingRecord hRecord : hRecords) { writer.append(hRecord.getRecord()); } } catch (IOException ioe) { // The records were put to Hive Streaming successfully, but there was an error while writing the // Avro records to the flow file. Log as an error and move on. logger.error("Error writing Avro records (which were sent successfully to Hive Streaming) to the flow file, " + ioe, ioe); } } writer.close(); })); }
public ReflectAvroFileWriter(File file, Class<T> _class, boolean append) throws IOException { schema = ReflectData.get().getSchema(_class); DatumWriter<T> datumWriter = new ReflectDatumWriter<T>(_class); writer = new DataFileWriter<T>(datumWriter) .setCodec(CodecFactory.deflateCodec(9)); if(append && file.exists()){ writer = writer.appendTo(file); }else{ writer = writer.create(schema, file); } }
writer.appendTo(trackerFile);
writer.appendTo(trackerFile);