/** * Returns the next record in the stream or <code>null</code> if no more records are available. Types will be coerced and any unknown fields will be dropped. * * @return the next record in the stream or <code>null</code> if no more records are available. * * @throws IOException if unable to read from the underlying data * @throws MalformedRecordException if an unrecoverable failure occurs when trying to parse a record * @throws SchemaValidationException if a Record contains a field that violates the schema and cannot be coerced into the appropriate field type. */ default Record nextRecord() throws IOException, MalformedRecordException { return nextRecord(true, true); }
@Override public Record next() throws IOException { try { return RecordReader.this.nextRecord(); } catch (final MalformedRecordException mre) { throw new IOException(mre); } } };
@Override public void write(long writeId, InputStream inputStream) throws StreamingException { // The inputStream is already available to the recordReader, so just iterate through the records try { Record record; while ((record = recordReader.nextRecord()) != null) { write(writeId, record); } } catch (MalformedRecordException | IOException e) { throw new StreamingException(e.getLocalizedMessage(), e); } }
while ((record = reader.nextRecord()) != null) { for (Map.Entry<String, RecordPath> entry : paths.entrySet()) { RecordPathResult result = entry.getValue().evaluate(record);
return Optional.ofNullable(errOrReader.get().getValue().nextRecord()); } catch (Exception e) { throw new LookupFailureException(String.format("Failed to read Record when looking up with %s", coordinates), e);
while ((currentRecord = recordParser.nextRecord()) != null) { Object sql = currentRecord.getValue(sqlField); if (sql == null || StringUtils.isEmpty((String) sql)) {
private Record handleResponse(InputStream is, Map<String, String> context) throws SchemaNotFoundException, MalformedRecordException, IOException { try (RecordReader reader = readerFactory.createRecordReader(context, is, getLogger())) { Record record = reader.nextRecord(); if (recordPath != null) { Optional<FieldValue> fv = recordPath.evaluate(record).getSelectedFields().findFirst(); if (fv.isPresent()) { FieldValue fieldValue = fv.get(); RecordSchema schema = new SimpleRecordSchema(Collections.singletonList(fieldValue.getField())); Record temp; Object value = fieldValue.getValue(); if (value instanceof Record) { temp = (Record) value; } else if (value instanceof Map) { temp = new MapRecord(schema, (Map<String, Object>) value); } else { Map<String, Object> val = new HashMap<>(); val.put(fieldValue.getField().getFieldName(), value); temp = new MapRecord(schema, val); } record = temp; } else { record = null; } } return record; } catch (Exception ex) { is.close(); throw ex; } }
@Override public boolean moveNext() { currentRow = null; try { final Record record = recordParser.nextRecord(); if (record == null) { // If we are out of data, close the InputStream. We do this because // Calcite does not necessarily call our close() method. close(); try { onFinish(); } catch (final Exception e) { logger.error("Failed to perform tasks when enumerator was finished", e); } return false; } currentRow = filterColumns(record); } catch (final Exception e) { throw new ProcessException("Failed to read next record in stream for " + flowFile + " due to " + e.getMessage(), e); } recordsRead++; return true; }
@Override public void process(final InputStream in, final OutputStream out) throws IOException { try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in, getLogger())) { final RecordSchema writeSchema = writerFactory.getSchema(originalAttributes, reader.getSchema()); try (final RecordSetWriter writer = writerFactory.createWriter(getLogger(), writeSchema, out)) { writer.beginRecordSet(); Record record; while ((record = reader.nextRecord()) != null) { final Record processed = AbstractRecordProcessor.this.process(record, writeSchema, original, context); writer.write(processed); } final WriteResult writeResult = writer.finishRecordSet(); attributes.put("record.count", String.valueOf(writeResult.getRecordCount())); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); attributes.putAll(writeResult.getAttributes()); recordCount.set(writeResult.getRecordCount()); } } catch (final SchemaNotFoundException e) { throw new ProcessException(e.getLocalizedMessage(), e); } catch (final MalformedRecordException e) { throw new ProcessException("Could not parse incoming data", e); } } });
@Override public void process(final InputStream in) throws IOException { try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in, getLogger())) { final RecordSchema writeSchema = writerFactory.getSchema(originalAttributes, reader.getSchema()); Record record; while ((record = reader.nextRecord()) != null) { final Set<Relationship> relationships = route(record, writeSchema, original, context, flowFileContext); numRecords.incrementAndGet(); for (final Relationship relationship : relationships) { final RecordSetWriter recordSetWriter; Tuple<FlowFile, RecordSetWriter> tuple = writers.get(relationship); if (tuple == null) { FlowFile outFlowFile = session.create(original); final OutputStream out = session.write(outFlowFile); recordSetWriter = writerFactory.createWriter(getLogger(), writeSchema, out); recordSetWriter.beginRecordSet(); tuple = new Tuple<>(outFlowFile, recordSetWriter); writers.put(relationship, tuple); } else { recordSetWriter = tuple.getValue(); } recordSetWriter.write(record); } } } catch (final SchemaNotFoundException | MalformedRecordException e) { throw new ProcessException("Could not parse incoming data", e); } } });
List<SolrInputDocument> inputDocumentList = new LinkedList<>(); try { while ((record = reader.nextRecord()) != null) { SolrInputDocument inputDoc = new SolrInputDocument(); writeRecord(record, inputDoc,fieldList,EMPTY_STRING);
Record record; if (startIndex >= 0) { while ( index++ < startIndex && (reader.nextRecord()) != null) {} while ((record = reader.nextRecord()) != null) { PutFlowFile putFlowFile = createPut(context, record, reader.getSchema(), recordPath, flowFile, rowFieldName, columnFamily, timestampFieldName, fieldEncodingStrategy, rowEncodingStrategy, complexFieldStrategy);
RecordSchema schema = reader.getSchema(); Record record; while ((record = reader.nextRecord()) != null) {
while ((record = reader.nextRecord()) != null) {
while ((record = reader.nextRecord()) != null) {
while ((record = recordReader.nextRecord()) != null) { if (recordWriter == null) { final OutputStream rawOut = session.write(merged);
batchStatement.setSerialConsistencyLevel(ConsistencyLevel.valueOf(serialConsistencyLevel)); while((record = reader.nextRecord()) != null) { Map<String, Object> recordContentMap = (Map<String, Object>) DataTypeUtils .convertRecordFieldtoObject(record, RecordFieldType.RECORD.getRecordDataType(record.getSchema()));
while ((record = reader.nextRecord()) != null) { Map<String, Object> recordMap = (Map<String, Object>) DataTypeUtils.convertRecordFieldtoObject(record, RecordFieldType.RECORD.getRecordDataType(record.getSchema()));
while ((record = reader.nextRecord()) != null) {
Record record = recordReader.nextRecord(); assertEquals(1, record.getValue("id")); assertEquals("John", record.getValue("firstName")); assertEquals("Doe", record.getValue("lastName")); record = recordReader.nextRecord(); assertEquals(2, record.getValue("id")); assertEquals("Jane", record.getValue("firstName"));