private CompletableFuture<ReadResultEntry> fetchNextEntry() { // Get the next item. We don't really rely on hasNext; we just use the fact that next() returns null // if there is nothing else to read. ReadResultEntry currentEntry = this.readResult.next(); if (currentEntry != null && currentEntry.getType() != ReadResultEntryType.EndOfStreamSegment) { // We have something to retrieve. CompletableFuture<ReadResultEntryContents> entryContentsFuture = currentEntry.getContent(); if (entryContentsFuture.isDone()) { // Result is readily available. return CompletableFuture.completedFuture(currentEntry); } else if (this.entryHandler.shouldRequestContents(currentEntry.getType(), currentEntry.getStreamSegmentOffset())) { // ReadResultEntry that does not have data readily available and we were instructed to request the content. currentEntry.requestContent(this.entryHandler.getRequestContentTimeout()); return entryContentsFuture.thenApply(v -> currentEntry); } } return null; }
/** * Reads the remaining contents of the ReadResult and returns an ordered List of InputStreams that contain its contents. * This will stop when either the given maximum length or the end of the ReadResult has been reached. * * @param maxLength The maximum number of bytes to read. * @param fetchTimeout A timeout to use when needing to fetch the contents of an entry that is not in the Cache. * @return A List containing InputStreams with the data read. */ default List<InputStream> readRemaining(int maxLength, Duration fetchTimeout) { int bytesRead = 0; ArrayList<InputStream> result = new ArrayList<>(); while (hasNext() && bytesRead < maxLength) { ReadResultEntry entry = next(); if (entry.getType() == ReadResultEntryType.EndOfStreamSegment || entry.getType() == ReadResultEntryType.Future) { // Reached the end. break; } else if (!entry.getContent().isDone()) { entry.requestContent(fetchTimeout); } result.add(entry.getContent().join().getData()); } return result; } }
@Override public boolean processEntry(ReadResultEntry entry) { if (!entry.getContent().isDone()) { // Make sure we only request content if it's not already available. entry.requestContent(getRequestContentTimeout()); } val contents = entry.getContent().join(); this.readLength.addAndGet(contents.getLength()); this.callback.accept(contents.getData(), entry.getStreamSegmentOffset(), contents.getLength()); return !this.cancellationToken.isCancellationRequested(); }
/** * Reads all of the cachedEntries from the ReadResult and puts their content into the cachedEntries list. * Upon encountering a non-cached entry, it stops iterating and returns it. */ private ReadResultEntry collectCachedEntries(long initialOffset, ReadResult readResult, ArrayList<ReadResultEntryContents> cachedEntries) { long expectedOffset = initialOffset; while (readResult.hasNext()) { ReadResultEntry entry = readResult.next(); if (entry.getType() == Cache) { Preconditions.checkState(entry.getStreamSegmentOffset() == expectedOffset, "Data returned from read was not contiguous."); ReadResultEntryContents content = entry.getContent().getNow(null); expectedOffset += content.getLength(); cachedEntries.add(content); } else { return entry; } } return null; }
@Test public void testReadDirectlyFromStore() throws Exception { String segmentName = "testReadFromStore"; final int entries = 10; final byte[] data = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; UUID clientId = UUID.randomUUID(); StreamSegmentStore segmentStore = serviceBuilder.createStreamSegmentService(); fillStoreForSegment(segmentName, clientId, data, entries, segmentStore); ReadResult result = segmentStore.read(segmentName, 0, entries * data.length, Duration.ZERO).get(); int index = 0; while (result.hasNext()) { ReadResultEntry entry = result.next(); ReadResultEntryType type = entry.getType(); assertTrue(type == ReadResultEntryType.Cache || type == ReadResultEntryType.Future); // Each ReadResultEntryContents may be of an arbitrary length - we should make no assumptions. // Also put a timeout when fetching the response in case we get back a Future read and it never completes. ReadResultEntryContents contents = entry.getContent().get(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); byte next; while ((next = (byte) contents.getData().read()) != -1) { byte expected = data[index % data.length]; assertEquals(expected, next); index++; } } assertEquals(entries * data.length, index); }
@Override public boolean processEntry(ReadResultEntry entry) { if (this.result.isDone()) { // We are done. Nothing else to do. return false; } try { Preconditions.checkArgument(entry.getContent().isDone(), "Entry Contents is not yet fetched."); ReadResultEntryContents contents = entry.getContent().join(); // TODO: most of these transfers are from memory to memory. It's a pity that we need an extra buffer to do the copy. // TODO: https://github.com/pravega/pravega/issues/2924 this.readData.write(StreamHelpers.readAll(contents.getData(), contents.getLength())); if (this.header == null && this.readData.size() >= EntrySerializer.HEADER_LENGTH) { // We now have enough to read the header. this.header = this.serializer.readHeader(this.readData.getData()); } if (this.header != null) { return !processReadData(this.readData.getData()); } return true; // Not done yet. } catch (Throwable ex) { processError(ex); return false; } }
/** * Blocks until all operations processed so far have been added to the ReadIndex and InMemoryOperationLog. * This is needed to simplify test verification due to the fact that the the OperationProcessor commits operations to * the ReadIndex and InMemoryOperationLog asynchronously, after those operations were ack-ed. This method makes use * of the fact that the OperationProcessor/MemoryStateUpdater will still commit such operations in sequence; it * creates a new segment, writes 1 byte to it and issues a read (actual/future) and waits until it's completed - when * it is, it is guaranteed that everything prior to that has been committed. */ private static void waitForOperationsInReadIndex(SegmentContainer container) throws Exception { TimeoutTimer timer = new TimeoutTimer(TIMEOUT); String segmentName = "test" + System.nanoTime(); container.createStreamSegment(segmentName, null, timer.getRemaining()) .thenCompose(v -> container.append(segmentName, new byte[1], null, timer.getRemaining())) .thenCompose(v -> container.read(segmentName, 0, 1, timer.getRemaining())) .thenCompose(rr -> { ReadResultEntry rre = rr.next(); rre.requestContent(TIMEOUT); return rre.getContent().thenRun(rr::close); }) .thenCompose(v -> container.deleteStreamSegment(segmentName, timer.getRemaining())) .get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); }
@Override public ReadResultEntry next() { ReadResultEntry result = results.remove(0); currentOffset = result.getStreamSegmentOffset(); return result; }
/** * Reads the remaining contents of the ReadResult into the given array. This will stop when the given target has been * filled or when the current end of the Segment has been reached. * * @param target A byte array where the ReadResult will be read into. * @param fetchTimeout A timeout to use when needing to fetch the contents of an entry that is not in the Cache. * @return The number of bytes read. */ @VisibleForTesting @SneakyThrows(IOException.class) default int readRemaining(byte[] target, Duration fetchTimeout) { int bytesRead = 0; while (hasNext() && bytesRead < target.length) { ReadResultEntry entry = next(); if (entry.getType() == ReadResultEntryType.EndOfStreamSegment || entry.getType() == ReadResultEntryType.Future) { // Reached the end. break; } else if (!entry.getContent().isDone()) { entry.requestContent(fetchTimeout); } ReadResultEntryContents contents = entry.getContent().join(); StreamHelpers.readAll(contents.getData(), target, bytesRead, Math.min(contents.getLength(), target.length - bytesRead)); bytesRead += contents.getLength(); } return bytesRead; }
while (readResult.hasNext()) { ReadResultEntry readEntry = readResult.next(); if (readEntry.getStreamSegmentOffset() >= segmentLength) { Assert.assertEquals("Unexpected value for isEndOfStreamSegment when reaching the end of sealed segment " + segmentName, ReadResultEntryType.EndOfStreamSegment, readEntry.getType()); AssertExtensions.assertSuppliedFutureThrows( "ReadResultEntry.getContent() returned a result when reached the end of sealed segment " + segmentName, ex -> ex instanceof IllegalStateException); } else { Assert.assertNotEquals("Unexpected value for isEndOfStreamSegment before reaching end of sealed segment " + segmentName, ReadResultEntryType.EndOfStreamSegment, readEntry.getType()); Assert.assertTrue("getContent() did not return a completed future for segment" + segmentName, readEntry.getContent().isDone() && !readEntry.getContent().isCompletedExceptionally()); ReadResultEntryContents readEntryContents = readEntry.getContent().join(); expectedCurrentOffset += readEntryContents.getLength(); readLength += readEntryContents.getLength();
/** * Tests the behavior of Future Reads on an empty index that is sealed. */ @Test public void testFutureReadsEmptyIndex() throws Exception { @Cleanup TestContext context = new TestContext(); // Create an empty segment. This is the easiest way to ensure the Read Index is empty. long segmentId = createSegment(0, context); @Cleanup val rr = context.readIndex.read(segmentId, 0, 1, TIMEOUT); val futureReadEntry = rr.next(); Assert.assertEquals("Unexpected entry type.", ReadResultEntryType.Future, futureReadEntry.getType()); Assert.assertFalse("ReadResultEntry is completed.", futureReadEntry.getContent().isDone()); // Seal the segment. This should complete all future reads. context.metadata.getStreamSegmentMetadata(segmentId).markSealed(); context.readIndex.triggerFutureReads(Collections.singleton(segmentId)); Assert.assertTrue("Expected future read to be failed after sealing.", futureReadEntry.getContent().isCompletedExceptionally()); AssertExtensions.assertSuppliedFutureThrows( "Expected future read to be failed with appropriate exception.", futureReadEntry::getContent, ex -> ex instanceof StreamSegmentSealedException); }
@Override public boolean processEntry(ReadResultEntry e) { ReadResultEntryContents c = e.getContent().join(); byte[] data = new byte[c.getLength()]; try { StreamHelpers.readAll(c.getData(), data, 0, data.length); readContents.write(data); return true; } catch (Exception ex) { processError(ex); return false; } }
val readResult = dsa.read(1, readBuffer.length, TIMEOUT); val firstEntry = readResult.next(); firstEntry.requestContent(TIMEOUT); val entryContents = firstEntry.getContent().join(); Assert.assertEquals("Unexpected number of bytes read.", readBuffer.length, entryContents.getLength()); StreamHelpers.readAll(entryContents.getData(), readBuffer, 0, readBuffer.length);
final String operation = "readSegment"; boolean truncated = nonCachedEntry != null && nonCachedEntry.getType() == Truncated; boolean endOfSegment = nonCachedEntry != null && nonCachedEntry.getType() == EndOfStreamSegment; boolean atTail = nonCachedEntry != null && nonCachedEntry.getType() == Future; connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, info.getStartOffset(), EMPTY_STACK_TRACE))) .exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, operation, wrapCancellationException(e))); } else { Preconditions.checkState(nonCachedEntry != null, "No ReadResultEntries returned from read!?"); nonCachedEntry.requestContent(TIMEOUT); nonCachedEntry.getContent() .thenAccept(contents -> { ByteBuffer data = copyData(Collections.singletonList(contents)); SegmentRead reply = new SegmentRead(segment, nonCachedEntry.getStreamSegmentOffset(), false, endOfSegment, data); connection.send(reply); dynamicLogger.incCounterValue(globalMetricName(SEGMENT_READ_BYTES), reply.getData().array().length); connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, nonCachedEntry.getStreamSegmentOffset(), clientReplyStackTrace)); } else { handleException(nonCachedEntry.getStreamSegmentOffset(), segment, operation, wrapCancellationException(e)); .exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, operation, wrapCancellationException(e)));
/** * Triggers all the Future Reads in the given collection. * * @param futureReads The Future Reads to trigger. */ private void triggerFutureReads(Collection<FutureReadResultEntry> futureReads) { for (FutureReadResultEntry r : futureReads) { ReadResultEntry entry = getSingleReadResultEntry(r.getStreamSegmentOffset(), r.getRequestedReadLength()); assert entry != null : "Serving a StorageReadResultEntry with a null result"; assert !(entry instanceof FutureReadResultEntry) : "Serving a FutureReadResultEntry with another FutureReadResultEntry."; log.trace("{}: triggerFutureReads (Offset = {}, Type = {}).", this.traceObjectId, r.getStreamSegmentOffset(), entry.getType()); if (entry.getType() == ReadResultEntryType.EndOfStreamSegment) { // We have attempted to read beyond the end of the stream. Fail the read request with the appropriate message. r.fail(new StreamSegmentSealedException(String.format("StreamSegment has been sealed at offset %d. There can be no more reads beyond this offset.", this.metadata.getLength()))); } else { if (!entry.getContent().isDone()) { // Normally, all Future Reads are served from Cache, since they reflect data that has just been appended. // However, it's possible that after recovery, we get a read for some data that we do not have in the // cache (but it's not a tail read) - this data exists in Storage but our StorageLength has not yet been // updated. As such, the only solution we have is to return a FutureRead which will be satisfied when // the Writer updates the StorageLength (and trigger future reads). In that scenario, entry we get // will likely not be auto-fetched, so we need to request the content. entry.requestContent(this.config.getStorageReadDefaultTimeout()); } CompletableFuture<ReadResultEntryContents> entryContent = entry.getContent(); entryContent.thenAccept(r::complete); Futures.exceptionListener(entryContent, r::fail); } } }
ReadResult rr = context.readIndex.read(notSealedId, context.metadata.getStreamSegmentMetadata(notSealedId).getLength(), 1, TIMEOUT); ReadResultEntry fe = rr.next(); Assert.assertEquals("Expecting a Future Read.", ReadResultEntryType.Future, fe.getType()); Assert.assertFalse("Not expecting Future Read to be completed.", fe.getContent().isDone()); context.readIndex.close(); Assert.assertTrue("Expected the Future Read to have been cancelled when the ReadIndex was closed.", fe.getContent().isCancelled());
@Override public boolean processEntry(ReadResultEntry e) { try { Assert.assertTrue("Received Entry that is not ready to serve data yet.", Futures.isSuccessful(e.getContent())); ReadResultEntryContents c = e.getContent().join(); byte[] data = new byte[c.getLength()]; StreamHelpers.readAll(c.getData(), data, 0, data.length); int idx = readEntryCount.getAndIncrement(); AssertExtensions.assertLessThan("Read too many entries.", entries.size(), idx); byte[] expected = entries.get(idx); Assert.assertArrayEquals(String.format("Unexpected read contents after reading %d entries.", idx + 1), expected, data); readCount.incrementAndGet(); } catch (Exception ex) { processError(ex); return false; } return true; }
private void checkSegmentReads(String segmentName, AtomicLong expectedCurrentOffset, long segmentLength, StreamSegmentStore store, byte[] expectedData) throws Exception { @Cleanup ReadResult readResult = store.read(segmentName, expectedCurrentOffset.get(), (int) (segmentLength - expectedCurrentOffset.get()), TIMEOUT).join(); Assert.assertTrue("Empty read result for segment " + segmentName, readResult.hasNext()); // A more thorough read check is done in StreamSegmentContainerTests; here we just check if the data was merged correctly. while (readResult.hasNext()) { ReadResultEntry readEntry = readResult.next(); AssertExtensions.assertGreaterThan("getRequestedReadLength should be a positive integer for segment " + segmentName, 0, readEntry.getRequestedReadLength()); Assert.assertEquals("Unexpected value from getStreamSegmentOffset for segment " + segmentName, expectedCurrentOffset.get(), readEntry.getStreamSegmentOffset()); if (!readEntry.getContent().isDone()) { readEntry.requestContent(TIMEOUT); } readEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); Assert.assertNotEquals("Unexpected value for isEndOfStreamSegment for non-sealed segment " + segmentName, ReadResultEntryType.EndOfStreamSegment, readEntry.getType()); ReadResultEntryContents readEntryContents = readEntry.getContent().join(); byte[] actualData = new byte[readEntryContents.getLength()]; StreamHelpers.readAll(readEntryContents.getData(), actualData, 0, actualData.length); AssertExtensions.assertArrayEquals("Unexpected data read from segment " + segmentName + " at offset " + expectedCurrentOffset, expectedData, (int) expectedCurrentOffset.get(), actualData, 0, readEntryContents.getLength()); expectedCurrentOffset.addAndGet(readEntryContents.getLength()); } Assert.assertTrue("ReadResult was not closed post-full-consumption for segment" + segmentName, readResult.isClosed()); }
val rr1 = StreamSegmentStorageReader.read(si1, 0, SEGMENT_LENGTH, SEGMENT_APPEND_SIZE - 1, s); val firstEntry1 = rr1.next(); Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, firstEntry1.getType()); AssertExtensions.assertSuppliedFutureThrows( "Unexpected exception when Segment does not exist initially.", () -> { firstEntry1.requestContent(TIMEOUT); return firstEntry1.getContent(); }, ex -> ex instanceof StreamSegmentNotExistsException); firstEntry2.requestContent(TIMEOUT); firstEntry2.getContent().join(); Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, secondEntry.getType()); AssertExtensions.assertSuppliedFutureThrows( "Unexpected exception when Segment was deleted while reading.", () -> { secondEntry.requestContent(TIMEOUT); return secondEntry.getContent(); }, ex -> ex instanceof StreamSegmentNotExistsException);
void performReadIndexChecks(Collection<OperationWithCompletion> operations, ReadIndex readIndex) throws Exception { AbstractMap<Long, Integer> expectedLengths = getExpectedLengths(operations); AbstractMap<Long, InputStream> expectedData = getExpectedContents(operations); for (Map.Entry<Long, InputStream> e : expectedData.entrySet()) { int expectedLength = expectedLengths.getOrDefault(e.getKey(), -1); @Cleanup ReadResult readResult = readIndex.read(e.getKey(), 0, expectedLength, TIMEOUT); int readLength = 0; while (readResult.hasNext()) { ReadResultEntryContents entry = readResult.next().getContent().join(); int length = entry.getLength(); readLength += length; int streamSegmentOffset = expectedLengths.getOrDefault(e.getKey(), 0); expectedLengths.put(e.getKey(), streamSegmentOffset + length); AssertExtensions.assertStreamEquals(String.format("Unexpected data returned from ReadIndex. StreamSegmentId = %d, Offset = %d.", e.getKey(), streamSegmentOffset), e.getValue(), entry.getData(), length); } Assert.assertEquals("Not enough bytes were read from the ReadIndex for StreamSegment " + e.getKey(), expectedLength, readLength); } }