private int ensureNotTruncated(final int length) throws EOFException { if (length > -1) { bytesRead += length; return length; } if (bytesRead < flowFile.getSize()) { throw new EOFException("Expected " + flowFile + " to contain " + flowFile.getSize() + " bytes but the content repository only had " + bytesRead + " bytes for it"); } return length; } };
public void acknowledge(final FlowFileRecord flowFile) { logger.debug("{} Acknowledging {}", this, flowFile); incrementUnacknowledgedQueueSize(-1, -flowFile.getSize()); }
private long drainQueue(final Queue<FlowFileRecord> sourceQueue, final List<FlowFileRecord> destination, int maxResults, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) { long drainedSize = 0L; FlowFileRecord pulled; while (destination.size() < maxResults && (pulled = sourceQueue.poll()) != null) { if (isExpired(pulled, expirationMillis)) { expiredRecords.add(pulled); if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { break; } } else { if (pulled.isPenalized()) { sourceQueue.add(pulled); break; } destination.add(pulled); } drainedSize += pulled.getSize(); } return drainedSize; }
private void doPoll(final List<FlowFileRecord> records, int maxResults, final Set<FlowFileRecord> expiredRecords, final long expirationMillis) { migrateSwapToActive(); final long bytesDrained = drainQueue(activeQueue, records, maxResults, expiredRecords, expirationMillis); long expiredBytes = 0L; for (final FlowFileRecord record : expiredRecords) { expiredBytes += record.getSize(); } incrementActiveQueueSize(-(expiredRecords.size() + records.size()), -bytesDrained); incrementUnacknowledgedQueueSize(records.size(), bytesDrained - expiredBytes); }
public void put(final FlowFileRecord flowFile) { writeLock.lock(); try { if (swapMode || activeQueue.size() >= swapThreshold) { swapQueue.add(flowFile); incrementSwapQueueSize(1, flowFile.getSize(), 0); swapMode = true; writeSwapFilesIfNecessary(); } else { incrementActiveQueueSize(1, flowFile.getSize()); activeQueue.add(flowFile); } logger.debug("{} put to {}", flowFile, this); } finally { writeLock.unlock("put(FlowFileRecord)"); } }
private void incrementConnectionOutputCounts(final Connection connection, final FlowFileRecord record) { incrementConnectionOutputCounts(connection.getIdentifier(), 1, record.getSize()); }
private void registerDequeuedRecord(final FlowFileRecord flowFile, final Connection connection) { final StandardRepositoryRecord record = new StandardRepositoryRecord(connection.getFlowFileQueue(), flowFile); records.put(flowFile.getId(), record); flowFilesIn++; contentSizeIn += flowFile.getSize(); Set<FlowFileRecord> set = unacknowledgedFlowFiles.get(connection.getFlowFileQueue()); if (set == null) { set = new HashSet<>(); unacknowledgedFlowFiles.put(connection.getFlowFileQueue(), set); } set.add(flowFile); incrementConnectionOutputCounts(connection, flowFile); }
protected QueuePartition putAndGetPartition(final FlowFileRecord flowFile) { final QueuePartition partition; partitionReadLock.lock(); try { adjustSize(1, flowFile.getSize()); partition = getPartition(flowFile); partition.put(flowFile); } finally { partitionReadLock.unlock(); } eventListener.triggerDestinationEvent(); return partition; }
@Override public void acknowledge(final FlowFileRecord flowFile) { localPartition.acknowledge(flowFile); adjustSize(-1, -flowFile.getSize()); eventListener.triggerSourceEvent(); }
public FlowFileRecord poll(final Set<FlowFileRecord> expiredRecords, final long expirationMillis) { FlowFileRecord flowFile; // First check if we have any records Pre-Fetched. writeLock.lock(); try { flowFile = doPoll(expiredRecords, expirationMillis); if (flowFile != null) { logger.debug("{} poll() returning {}", this, flowFile); incrementUnacknowledgedQueueSize(1, flowFile.getSize()); } return flowFile; } finally { writeLock.unlock("poll(Set)"); } }
private void incrementConnectionInputCounts(final Connection connection, final RepositoryRecord record) { incrementConnectionInputCounts(connection.getIdentifier(), 1, record.getCurrent().getSize()); }
protected QueueSize drop(final List<FlowFileRecord> flowFiles, final String requestor) throws IOException { // Create a Provenance Event and a FlowFile Repository record for each FlowFile final List<ProvenanceEventRecord> provenanceEvents = new ArrayList<>(flowFiles.size()); final List<RepositoryRecord> flowFileRepoRecords = new ArrayList<>(flowFiles.size()); for (final FlowFileRecord flowFile : flowFiles) { provenanceEvents.add(createDropProvenanceEvent(flowFile, requestor)); flowFileRepoRecords.add(createDeleteRepositoryRecord(flowFile)); } long dropContentSize = 0L; for (final FlowFileRecord flowFile : flowFiles) { dropContentSize += flowFile.getSize(); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim == null) { continue; } final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); if (resourceClaim == null) { continue; } resourceClaimManager.decrementClaimantCount(resourceClaim); } provRepository.registerEvents(provenanceEvents); flowFileRepository.updateRepository(flowFileRepoRecords); return new QueueSize(flowFiles.size(), dropContentSize); }
public Builder fromFlowFile(final FlowFileRecord specFlowFile) { if (specFlowFile == null) { return this; } bId = specFlowFile.getId(); bEntryDate = specFlowFile.getEntryDate(); bLineageStartDate = specFlowFile.getLineageStartDate(); bLineageStartIndex = specFlowFile.getLineageStartIndex(); bLineageIdentifiers.clear(); bPenaltyExpirationMs = specFlowFile.getPenaltyExpirationMillis(); bSize = specFlowFile.getSize(); // If this is a StandardFlowFileRecord, access the attributes map directly. Do not use the // getAttributes() method, because that will wrap the original in an UnmodifiableMap. As a result, // a Processor that continually calls session.append() for instance will have a FlowFile whose attributes // Map is wrapped thousands of times until it hits a StackOverflowError. We want the getter to return // UnmodifiableMap, though, so that Processors cannot directly modify that Map. bAttributes = specFlowFile instanceof StandardFlowFileRecord ? ((StandardFlowFileRecord) specFlowFile).attributes : specFlowFile.getAttributes(); bAttributesCopied = false; bClaim = specFlowFile.getContentClaim(); bClaimOffset = specFlowFile.getContentClaimOffset(); bLastQueueDate = specFlowFile.getLastQueueDate(); bQueueDateIndex = specFlowFile.getQueueDateIndex(); return this; }
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case FlowFileSchema.ATTRIBUTES: return flowFile.getAttributes(); case FlowFileSchema.CONTENT_CLAIM: return contentClaim; case FlowFileSchema.ENTRY_DATE: return flowFile.getEntryDate(); case FlowFileSchema.FLOWFILE_SIZE: return flowFile.getSize(); case FlowFileSchema.LINEAGE_START_DATE: return flowFile.getLineageStartDate(); case FlowFileSchema.LINEAGE_START_INDEX: return flowFile.getLineageStartIndex(); case FlowFileSchema.QUEUE_DATE: return flowFile.getLastQueueDate(); case FlowFileSchema.QUEUE_DATE_INDEX: return flowFile.getQueueDateIndex(); case FlowFileSchema.RECORD_ID: return flowFile.getId(); } return null; }
private void registerReceiveProvenanceEvents(final List<RemoteFlowFileRecord> flowFiles, final String nodeName, final String connectionId, final long startTimestamp) { final long duration = System.currentTimeMillis() - startTimestamp; final List<ProvenanceEventRecord> events = new ArrayList<>(flowFiles.size()); for (final RemoteFlowFileRecord remoteFlowFile : flowFiles) { final FlowFileRecord flowFileRecord = remoteFlowFile.getFlowFile(); final ProvenanceEventBuilder provenanceEventBuilder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFileRecord) .setEventType(ProvenanceEventType.RECEIVE) .setTransitUri("nifi://" + nodeName + "/loadbalance/" + connectionId) .setSourceSystemFlowFileIdentifier(remoteFlowFile.getRemoteUuid()) .setEventDuration(duration) .setComponentId(connectionId) .setComponentType("Load Balanced Connection"); final ContentClaim contentClaim = flowFileRecord.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); provenanceEventBuilder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset() + flowFileRecord.getContentClaimOffset(), flowFileRecord.getSize()); } final ProvenanceEventRecord provenanceEvent = provenanceEventBuilder.build(); events.add(provenanceEvent); } provenanceRepository.registerEvents(events); }
private void updateEventContentClaims(final ProvenanceEventBuilder builder, final FlowFile flowFile, final StandardRepositoryRecord repoRecord) { final ContentClaim originalClaim = repoRecord.getOriginalClaim(); if (originalClaim == null) { builder.setCurrentContentClaim(null, null, null, null, 0L); } else { final ResourceClaim resourceClaim = originalClaim.getResourceClaim(); builder.setCurrentContentClaim( resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), repoRecord.getOriginal().getContentClaimOffset() + originalClaim.getOffset(), repoRecord.getOriginal().getSize()); builder.setPreviousContentClaim( resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), repoRecord.getOriginal().getContentClaimOffset() + originalClaim.getOffset(), repoRecord.getOriginal().getSize()); } }
private ProvenanceEventRecord createDropEvent(final FlowFileRecord flowFile, final String details) { final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFile) .setEventType(ProvenanceEventType.DROP) .setDetails(details) .setComponentId(flowFileQueue.getIdentifier()) .setComponentType("Connection") .setSourceQueueIdentifier(flowFileQueue.getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); } final ProvenanceEventRecord dropEvent = builder.build(); return dropEvent; }
private ProvenanceEventRecord createDropProvenanceEvent(final FlowFileRecord flowFile, final String requestor) { final ProvenanceEventBuilder builder = provRepository.eventBuilder(); builder.fromFlowFile(flowFile); builder.setEventType(ProvenanceEventType.DROP); builder.setLineageStartDate(flowFile.getLineageStartDate()); builder.setComponentId(getIdentifier()); builder.setComponentType("Connection"); builder.setAttributes(flowFile.getAttributes(), Collections.emptyMap()); builder.setDetails("FlowFile Queue emptied by " + requestor); builder.setSourceQueueIdentifier(getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset(), flowFile.getSize()); } return builder.build(); }
private ProvenanceEventRecord createSendEvent(final FlowFileRecord flowFile, final NodeIdentifier nodeIdentifier) { final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() .fromFlowFile(flowFile) .setEventType(ProvenanceEventType.SEND) .setDetails("Re-distributed for Load-balanced connection") .setComponentId(flowFileQueue.getIdentifier()) .setComponentType("Connection") .setSourceQueueIdentifier(flowFileQueue.getIdentifier()) .setSourceSystemFlowFileIdentifier(flowFile.getAttribute(CoreAttributes.UUID.key())) .setTransitUri("nifi://" + nodeIdentifier.getApiAddress() + "/loadbalance/" + flowFileQueue.getIdentifier()); final ContentClaim contentClaim = flowFile.getContentClaim(); if (contentClaim != null) { final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); } final ProvenanceEventRecord sendEvent = builder.build(); return sendEvent; }
final ContentClaim originalClaim = repoRecord.getOriginalClaim(); final long originalOffset = repoRecord.getOriginal().getContentClaimOffset(); final long originalSize = repoRecord.getOriginal().getSize();