@Override public ContentClaim getOriginalClaim() { return (originalFlowFileRecord == null) ? null : originalFlowFileRecord.getContentClaim(); }
public FlowFileRecordFieldMap(final FlowFileRecord flowFile, final RecordSchema schema) { this.flowFile = flowFile; this.schema = schema; final RecordField contentClaimField = schema.getField(FlowFileSchema.CONTENT_CLAIM); contentClaimSchema = new RecordSchema(contentClaimField.getSubFields()); contentClaim = flowFile.getContentClaim() == null ? null : new ContentClaimFieldMap(flowFile.getContentClaim(), flowFile.getContentClaimOffset(), contentClaimSchema); }
final InputStream rawIn; try { rawIn = contentRepository.read(flowFile.getContentClaim()); } catch (final ContentNotFoundException cnfe) { throw new ContentNotFoundException(flowFile, flowFile.getContentClaim(), cnfe.getMessage()); StreamUtils.skip(rawIn, flowFile.getContentClaimOffset()); } catch (final EOFException eof) { throw new ContentNotFoundException(flowFile, flowFile.getContentClaim(), "FlowFile has a Content Claim Offset of " + flowFile.getContentClaimOffset() + " bytes but the Content Claim does not have that many bytes");
@Override public ContentClaim getCurrentClaim() { return flowFile.getContentClaim(); }
final ContentClaim claim1 = f1.getContentClaim(); final ContentClaim claim2 = f2.getContentClaim();
@Override public ContentClaim getOriginalClaim() { return flowFile.getContentClaim(); }
out.writeLong(flowFile.getSize()); final ContentClaim claim = flowFile.getContentClaim(); if (claim == null) { out.writeBoolean(false);
ContentClaim getWorkingClaim() { return (workingFlowFileRecord == null) ? null : workingFlowFileRecord.getContentClaim(); }
public Builder fromFlowFile(final FlowFileRecord specFlowFile) { if (specFlowFile == null) { return this; } bId = specFlowFile.getId(); bEntryDate = specFlowFile.getEntryDate(); bLineageStartDate = specFlowFile.getLineageStartDate(); bLineageStartIndex = specFlowFile.getLineageStartIndex(); bLineageIdentifiers.clear(); bPenaltyExpirationMs = specFlowFile.getPenaltyExpirationMillis(); bSize = specFlowFile.getSize(); // If this is a StandardFlowFileRecord, access the attributes map directly. Do not use the // getAttributes() method, because that will wrap the original in an UnmodifiableMap. As a result, // a Processor that continually calls session.append() for instance will have a FlowFile whose attributes // Map is wrapped thousands of times until it hits a StackOverflowError. We want the getter to return // UnmodifiableMap, though, so that Processors cannot directly modify that Map. bAttributes = specFlowFile instanceof StandardFlowFileRecord ? ((StandardFlowFileRecord) specFlowFile).attributes : specFlowFile.getAttributes(); bAttributesCopied = false; bClaim = specFlowFile.getContentClaim(); bClaimOffset = specFlowFile.getContentClaimOffset(); bLastQueueDate = specFlowFile.getLastQueueDate(); bQueueDateIndex = specFlowFile.getQueueDateIndex(); return this; }
@Override public ContentClaim getCurrentClaim() { return (getCurrent() == null) ? null : getCurrent().getContentClaim(); }
.setEventTime(System.currentTimeMillis()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim();
protected QueueSize drop(final List<FlowFileRecord> flowFiles, final String requestor) throws IOException { // Create a Provenance Event and a FlowFile Repository record for each FlowFile final List<ProvenanceEventRecord> provenanceEvents = new ArrayList<>(flowFiles.size()); final List<RepositoryRecord> flowFileRepoRecords = new ArrayList<>(flowFiles.size()); for (final FlowFileRecord flowFile : flowFiles) { provenanceEvents.add(createDropProvenanceEvent(flowFile, requestor)); flowFileRepoRecords.add(createDeleteRepositoryRecord(flowFile)); } long dropContentSize = 0L; for (final FlowFileRecord flowFile : flowFiles) { dropContentSize += flowFile.getSize(); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim == null) { continue; } final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); if (resourceClaim == null) { continue; } resourceClaimManager.decrementClaimantCount(resourceClaim); } provRepository.registerEvents(provenanceEvents); flowFileRepository.updateRepository(flowFileRepoRecords); return new QueueSize(flowFiles.size(), dropContentSize); }
private void registerReceiveProvenanceEvents(final List<RemoteFlowFileRecord> flowFiles, final String nodeName, final String connectionId, final long startTimestamp) { final long duration = System.currentTimeMillis() - startTimestamp; final List<ProvenanceEventRecord> events = new ArrayList<>(flowFiles.size()); for (final RemoteFlowFileRecord remoteFlowFile : flowFiles) { final FlowFileRecord flowFileRecord = remoteFlowFile.getFlowFile(); final ProvenanceEventBuilder provenanceEventBuilder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFileRecord) .setEventType(ProvenanceEventType.RECEIVE) .setTransitUri("nifi://" + nodeName + "/loadbalance/" + connectionId) .setSourceSystemFlowFileIdentifier(remoteFlowFile.getRemoteUuid()) .setEventDuration(duration) .setComponentId(connectionId) .setComponentType("Load Balanced Connection"); final ContentClaim contentClaim = flowFileRecord.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); provenanceEventBuilder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset() + flowFileRecord.getContentClaimOffset(), flowFileRecord.getSize()); } final ProvenanceEventRecord provenanceEvent = provenanceEventBuilder.build(); events.add(provenanceEvent); } provenanceRepository.registerEvents(events); }
final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { resourceClaims.add(contentClaim.getResourceClaim());
final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim == null) { resourceClaim = null; stream = new ByteArrayInputStream(new byte[0]); } else { resourceClaim = flowFile.getContentClaim().getResourceClaim(); stream = contentRepository.read(flowFile.getContentClaim()); final long contentClaimOffset = flowFile.getContentClaimOffset(); if (contentClaimOffset > 0L) {
contentRepository.incrementClaimaintCount(flowFile.getFlowFile().getContentClaim()); contentRepository.decrementClaimantCount(remoteFlowFile.getFlowFile().getContentClaim());
private ProvenanceEventRecord createDropEvent(final FlowFileRecord flowFile, final String details) { final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFile) .setEventType(ProvenanceEventType.DROP) .setDetails(details) .setComponentId(flowFileQueue.getIdentifier()) .setComponentType("Connection") .setSourceQueueIdentifier(flowFileQueue.getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); } final ProvenanceEventRecord dropEvent = builder.build(); return dropEvent; }
private ProvenanceEventRecord createDropProvenanceEvent(final FlowFileRecord flowFile, final String requestor) { final ProvenanceEventBuilder builder = provRepository.eventBuilder(); builder.fromFlowFile(flowFile); builder.setEventType(ProvenanceEventType.DROP); builder.setLineageStartDate(flowFile.getLineageStartDate()); builder.setComponentId(getIdentifier()); builder.setComponentType("Connection"); builder.setAttributes(flowFile.getAttributes(), Collections.emptyMap()); builder.setDetails("FlowFile Queue emptied by " + requestor); builder.setSourceQueueIdentifier(getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset(), flowFile.getSize()); } return builder.build(); }
private ProvenanceEventRecord createSendEvent(final FlowFileRecord flowFile, final NodeIdentifier nodeIdentifier) { final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFile) .setEventType(ProvenanceEventType.SEND) .setDetails("Re-distributed for Load-balanced connection") .setComponentId(flowFileQueue.getIdentifier()) .setComponentType("Connection") .setSourceQueueIdentifier(flowFileQueue.getIdentifier()) .setSourceSystemFlowFileIdentifier(flowFile.getAttribute(CoreAttributes.UUID.key())) .setTransitUri("nifi://" + nodeIdentifier.getApiAddress() + "/loadbalance/" + flowFileQueue.getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); } final ProvenanceEventRecord sendEvent = builder.build(); return sendEvent; }
/** * Creates a FlowFileDTO from the specified FlowFileRecord. * * @param record record * @return dto */ public FlowFileDTO createFlowFileDTO(final FlowFileRecord record) { final Date now = new Date(); final FlowFileDTO dto = new FlowFileDTO(); dto.setUuid(record.getAttribute(CoreAttributes.UUID.key())); dto.setFilename(record.getAttribute(CoreAttributes.FILENAME.key())); dto.setPenalized(record.isPenalized()); dto.setSize(record.getSize()); dto.setAttributes(record.getAttributes()); final long queuedDuration = now.getTime() - record.getLastQueueDate(); dto.setQueuedDuration(queuedDuration); final long age = now.getTime() - record.getLineageStartDate(); dto.setLineageDuration(age); final ContentClaim contentClaim = record.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); dto.setContentClaimSection(resourceClaim.getSection()); dto.setContentClaimContainer(resourceClaim.getContainer()); dto.setContentClaimIdentifier(resourceClaim.getId()); dto.setContentClaimOffset(contentClaim.getOffset() + record.getContentClaimOffset()); dto.setContentClaimFileSizeBytes(record.getSize()); dto.setContentClaimFileSize(FormatUtils.formatDataSize(record.getSize())); } return dto; }