@Override public Record next() throws IOException { if (count >= maxRecords) { return null; } final Record record = original.next(); if (record != null) { count++; } return record; } };
@Override public RecordSchema getSchema() throws IOException { return original.getSchema(); }
@Override public WriteResult write(final RecordSet rs) throws IOException { final int colCount = rs.getSchema().getFieldCount(); Assert.assertEquals(columnNames.size(), colCount); final List<String> colNames = new ArrayList<>(colCount); for (int i = 0; i < colCount; i++) { colNames.add(rs.getSchema().getField(i).getFieldName()); } Assert.assertEquals(columnNames, colNames); // Iterate over the rest of the records to ensure that we read the entire stream. If we don't // do this, we won't consume all of the data and as a result we will not close the stream properly Record record; while ((record = rs.next()) != null) { System.out.println(record); } return WriteResult.of(0, Collections.emptyMap()); }
@Override public Record next() throws IOException { if (pushback != null) { final Record record = pushback; pushback = null; return record; } return original.next(); }
@Override public RecordSchema getSchema() throws IOException { return original.getSchema(); }
/** * @param recordSet the RecordSet to write * @return the result of writing the record set * @throws IOException if an I/O error happens reading from the RecordSet, or writing a Record */ default WriteResult write(final RecordSet recordSet) throws IOException { int recordCount = 0; Record record; while ((record = recordSet.next()) != null) { write(record); recordCount++; } return WriteResult.of(recordCount, Collections.emptyMap()); }
@Override public RecordSchema getSchema() throws IOException { return original.getSchema(); }
while ((record = rs.next()) != null) { if (++recordCount > failAfterN && failAfterN > -1) { throw new IOException("Unit Test intentionally throwing IOException after " + failAfterN + " records were written");
@Override public RecordSchema getSchema() throws IOException { return original.getSchema(); }
@Override public WriteResult write(final RecordSet recordSet) throws IOException { beginRecordSet(); Record record; while ((record = recordSet.next()) != null) { write(record); } return finishRecordSet(); }
/** * @param recordSet the RecordSet to write * @return the result of writing the record set * @throws IOException if an I/O error happens reading from the RecordSet, or writing a Record */ public WriteResult write(final RecordSet recordSet) throws IOException { int recordCount = 0; Record record; while ((record = recordSet.next()) != null) { write(record); recordCount++; } // Add Hive DDL Attribute String hiveDDL = NiFiOrcUtils.generateHiveDDL(recordSchema, hiveTableName, hiveFieldNames); Map<String, String> attributes = new HashMap<String, String>() {{ put(HIVE_DDL_ATTRIBUTE, hiveDDL); }}; return WriteResult.of(recordCount, attributes); }
while ((record = recordSet.next()) != null) { recordCount++; baos.reset();
while ((record = recordSet.next()) != null) { recordCount++; baos.reset();
while ((record = recordSet.next()) != null) { recordCount++; baos.reset();
while ((record = recordSet.next()) != null) { recordCount++; baos.reset();
@Override public Record next() throws IOException { if (count >= maxRecords) { return null; } final Record record = original.next(); if (record != null) { count++; } return record; } };
@Override public Record next() throws IOException { if (pushback != null) { final Record record = pushback; pushback = null; return record; } return original.next(); }
@Override public WriteResult write(final RecordSet recordSet) throws IOException { beginRecordSet(); Record record; while ((record = recordSet.next()) != null) { write(record); } return finishRecordSet(); }
while ((record = recordSet.next()) != null) { recordCount++; baos.reset();