@Override public Long getRecordIdentifier(final RepositoryRecord record) { return record.getCurrent().getId(); }
@Override public Long getRecordIdentifier(final RepositoryRecord record) { return record.getCurrent().getId(); }
private void registerDequeuedRecord(final FlowFileRecord flowFile, final Connection connection) { final StandardRepositoryRecord record = new StandardRepositoryRecord(connection.getFlowFileQueue(), flowFile); records.put(flowFile.getId(), record); flowFilesIn++; contentSizeIn += flowFile.getSize(); Set<FlowFileRecord> set = unacknowledgedFlowFiles.get(connection.getFlowFileQueue()); if (set == null) { set = new HashSet<>(); unacknowledgedFlowFiles.put(connection.getFlowFileQueue(), set); } set.add(flowFile); incrementConnectionOutputCounts(connection, flowFile); }
@Override public FlowFile create() { verifyTaskActive(); final Map<String, String> attrs = new HashMap<>(); final String uuid = UUID.randomUUID().toString(); attrs.put(CoreAttributes.FILENAME.key(), uuid); attrs.put(CoreAttributes.PATH.key(), DEFAULT_FLOWFILE_PATH); attrs.put(CoreAttributes.UUID.key(), uuid); final FlowFileRecord fFile = new StandardFlowFileRecord.Builder().id(context.getNextFlowFileSequence()) .addAttributes(attrs) .build(); final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, attrs); records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key())); return fFile; }
newOwner.records.put(flowFileRecord.getId(), repoRecord);
final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, newAttributes); records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key()));
records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key()));
newRecord.setTransferRelationship(record.getTransferRelationship()); toAdd.put(clone.getId(), newRecord);
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case FlowFileSchema.ATTRIBUTES: return flowFile.getAttributes(); case FlowFileSchema.CONTENT_CLAIM: return contentClaim; case FlowFileSchema.ENTRY_DATE: return flowFile.getEntryDate(); case FlowFileSchema.FLOWFILE_SIZE: return flowFile.getSize(); case FlowFileSchema.LINEAGE_START_DATE: return flowFile.getLineageStartDate(); case FlowFileSchema.LINEAGE_START_INDEX: return flowFile.getLineageStartIndex(); case FlowFileSchema.QUEUE_DATE: return flowFile.getLastQueueDate(); case FlowFileSchema.QUEUE_DATE_INDEX: return flowFile.getQueueDateIndex(); case FlowFileSchema.RECORD_ID: return flowFile.getId(); } return null; }
public Builder fromFlowFile(final FlowFileRecord specFlowFile) { if (specFlowFile == null) { return this; } bId = specFlowFile.getId(); bEntryDate = specFlowFile.getEntryDate(); bLineageStartDate = specFlowFile.getLineageStartDate(); bLineageStartIndex = specFlowFile.getLineageStartIndex(); bLineageIdentifiers.clear(); bPenaltyExpirationMs = specFlowFile.getPenaltyExpirationMillis(); bSize = specFlowFile.getSize(); // If this is a StandardFlowFileRecord, access the attributes map directly. Do not use the // getAttributes() method, because that will wrap the original in an UnmodifiableMap. As a result, // a Processor that continually calls session.append() for instance will have a FlowFile whose attributes // Map is wrapped thousands of times until it hits a StackOverflowError. We want the getter to return // UnmodifiableMap, though, so that Processors cannot directly modify that Map. bAttributes = specFlowFile instanceof StandardFlowFileRecord ? ((StandardFlowFileRecord) specFlowFile).attributes : specFlowFile.getAttributes(); bAttributesCopied = false; bClaim = specFlowFile.getContentClaim(); bClaimOffset = specFlowFile.getContentClaimOffset(); bLastQueueDate = specFlowFile.getLastQueueDate(); bQueueDateIndex = specFlowFile.getQueueDateIndex(); return this; }
@Override public FlowFile clone(FlowFile example, final long offset, final long size) { verifyTaskActive(); example = validateRecordState(example); final StandardRepositoryRecord exampleRepoRecord = getRecord(example); final FlowFileRecord currRec = exampleRepoRecord.getCurrent(); final ContentClaim claim = exampleRepoRecord.getCurrentClaim(); if (offset + size > example.getSize()) { throw new FlowFileHandlingException("Specified offset of " + offset + " and size " + size + " exceeds size of " + example.toString()); } final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec); builder.id(context.getNextFlowFileSequence()); builder.contentClaimOffset(currRec.getContentClaimOffset() + offset); builder.size(size); final String newUuid = UUID.randomUUID().toString(); builder.addAttribute(CoreAttributes.UUID.key(), newUuid); final FlowFileRecord clone = builder.build(); if (claim != null) { context.getContentRepository().incrementClaimaintCount(claim); } final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(clone, clone.getAttributes()); records.put(clone.getId(), record); if (offset == 0L && size == example.getSize()) { provenanceReporter.clone(example, clone); } else { registerForkEvent(example, clone); } return clone; }
final FlowFileRecord eventFlowFile = flowFileRecordMap.get(rawEvent.getFlowFileUuid()); if (eventFlowFile != null) { final StandardRepositoryRecord repoRecord = records.get(eventFlowFile.getId());
return record.getType().name(); case RepositoryRecordSchema.RECORD_ID: return record.getCurrent().getId(); case RepositoryRecordSchema.SWAP_LOCATION: return record.getSwapLocation();
@Override public Long getRecordIdentifier(final RepositoryRecord record) { return record.getCurrent().getId(); }
@Override public Long getRecordIdentifier(final RepositoryRecord record) { return record.getCurrent().getId(); }
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case FlowFileSchema.ATTRIBUTES: return flowFile.getAttributes(); case FlowFileSchema.CONTENT_CLAIM: return contentClaim; case FlowFileSchema.ENTRY_DATE: return flowFile.getEntryDate(); case FlowFileSchema.FLOWFILE_SIZE: return flowFile.getSize(); case FlowFileSchema.LINEAGE_START_DATE: return flowFile.getLineageStartDate(); case FlowFileSchema.LINEAGE_START_INDEX: return flowFile.getLineageStartIndex(); case FlowFileSchema.QUEUE_DATE: return flowFile.getLastQueueDate(); case FlowFileSchema.QUEUE_DATE_INDEX: return flowFile.getQueueDateIndex(); case FlowFileSchema.RECORD_ID: return flowFile.getId(); } return null; }
public Builder fromFlowFile(final FlowFileRecord specFlowFile) { if (specFlowFile == null) { return this; } bId = specFlowFile.getId(); bEntryDate = specFlowFile.getEntryDate(); bLineageStartDate = specFlowFile.getLineageStartDate(); bLineageStartIndex = specFlowFile.getLineageStartIndex(); bLineageIdentifiers.clear(); bPenaltyExpirationMs = specFlowFile.getPenaltyExpirationMillis(); bSize = specFlowFile.getSize(); // If this is a StandardFlowFileRecord, access the attributes map directly. Do not use the // getAttributes() method, because that will wrap the original in an UnmodifiableMap. As a result, // a Processor that continually calls session.append() for instance will have a FlowFile whose attributes // Map is wrapped thousands of times until it hits a StackOverflowError. We want the getter to return // UnmodifiableMap, though, so that Processors cannot directly modify that Map. bAttributes = specFlowFile instanceof StandardFlowFileRecord ? ((StandardFlowFileRecord) specFlowFile).attributes : specFlowFile.getAttributes(); bAttributesCopied = false; bClaim = specFlowFile.getContentClaim(); bClaimOffset = specFlowFile.getContentClaimOffset(); bLastQueueDate = specFlowFile.getLastQueueDate(); bQueueDateIndex = specFlowFile.getQueueDateIndex(); return this; }