@Test(timeout = 20000) public void testReadSegmentTruncated() { // Set up PravegaRequestProcessor instance to execute read segment request against String streamSegmentName = "testReadSegment"; int readLength = 1000; StreamSegmentStore store = mock(StreamSegmentStore.class); ServerConnection connection = mock(ServerConnection.class); PravegaRequestProcessor processor = new PravegaRequestProcessor(store, mock(TableStore.class), connection); TestReadResultEntry entry1 = new TestReadResultEntry(ReadResultEntryType.Truncated, 0, readLength); List<ReadResultEntry> results = new ArrayList<>(); results.add(entry1); CompletableFuture<ReadResult> readResult = new CompletableFuture<>(); readResult.complete(new TestReadResult(0, readLength, results)); when(store.read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT)).thenReturn(readResult); StreamSegmentInformation info = StreamSegmentInformation.builder() .name(streamSegmentName) .length(1234) .startOffset(123) .build(); when(store.getStreamSegmentInfo(streamSegmentName, PravegaRequestProcessor.TIMEOUT)) .thenReturn(CompletableFuture.completedFuture(info)); // Execute and Verify readSegment calling stack in connection and store is executed as design. processor.readSegment(new WireCommands.ReadSegment(streamSegmentName, 0, readLength, "")); verify(store).read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT); verify(store).getStreamSegmentInfo(streamSegmentName, PravegaRequestProcessor.TIMEOUT); verify(connection).send(new WireCommands.SegmentIsTruncated(0, streamSegmentName, info.getStartOffset(), "")); verifyNoMoreInteractions(connection); verifyNoMoreInteractions(store); }
@Override public SegmentProperties getStreamSegmentInfo(String streamSegmentName) throws StreamSegmentException { ensureInitializedAndNotClosed(); long traceId = LoggerHelpers.traceEnter(log, "getStreamSegmentInfo", streamSegmentName); try { return HDFS_RETRY.run(() -> { FileStatus last = findStatusForSegment(streamSegmentName, true); boolean isSealed = isSealed(last.getPath()); StreamSegmentInformation result = StreamSegmentInformation.builder().name(streamSegmentName).length(last.getLen()).sealed(isSealed).build(); LoggerHelpers.traceLeave(log, "getStreamSegmentInfo", traceId, streamSegmentName, result); return result; }); } catch (IOException e) { throw HDFSExceptionHelpers.convertException(streamSegmentName, e); } catch (RetriesExhaustedException e) { throw HDFSExceptionHelpers.convertException(streamSegmentName, e.getCause()); } }
.name(mapOp.getStreamSegmentName()) .startOffset(startOffset) .length(storageLength) .sealed(true) .attributes(createAttributes()) .build()); updateMap.setStreamSegmentId(mapOp.getStreamSegmentId()); txn2.preProcessOperation(updateMap); .name(mapOp.getStreamSegmentName() + "_pinned") .startOffset(startOffset) .length(storageLength) .sealed(true) .attributes(createAttributes()) .build()); pinnedMap.markPinned(); txn2.preProcessOperation(pinnedMap);
final String segmentName = "Segment"; final long segmentId = 123; final SegmentProperties storageInfo = StreamSegmentInformation.builder().name(segmentName).length(123).sealed(true).build(); final long metadataLength = storageInfo.getLength() + 1; val initialSegmentInfo = StreamSegmentInformation .builder() .name(segmentName) .startOffset(0L) .length(1L) .attributes(toAttributes(createAttributeUpdates(ATTRIBUTE_COUNT))) .build(); context.getMetadataStore().updateSegmentInfo(toMetadata(segmentId, initialSegmentInfo), TIMEOUT).join(); Map<UUID, Long> expectedAttributes = initialSegmentInfo.getAttributes();
.name(segmentName) .length(123) .sealed(true) .attributes(toAttributes(createAttributeUpdates(ATTRIBUTE_COUNT))) .build();
.name(segmentName) .length(getSegmentLength.apply(segmentName)) .startOffset(getSegmentStartOffset.apply(segmentName)) .sealed(i % 2 == 0) .attributes(toAttributes(createAttributeUpdates(ATTRIBUTE_COUNT))) .build();
val si1 = StreamSegmentInformation.builder().name(SEGMENT_NAME).length(SEGMENT_LENGTH).startOffset(0).sealed(true).build(); val rr1 = StreamSegmentStorageReader.read(si1, 0, SEGMENT_LENGTH, SEGMENT_APPEND_SIZE - 1, s); val firstEntry1 = rr1.next(); val si2 = StreamSegmentInformation.builder().name(SEGMENT_NAME).length(SEGMENT_LENGTH).startOffset(0).sealed(true).build(); val rr2 = StreamSegmentStorageReader.read(si2, 0, SEGMENT_LENGTH, SEGMENT_APPEND_SIZE - 1, s);
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties metadataProps, TestContext context) { if (metadataProps.getLength() == 0) { // Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this. return CompletableFuture.completedFuture(null); } Function<SegmentProperties, Boolean> meetsConditions = storageProps -> storageProps.isSealed() == metadataProps.isSealed() && storageProps.getLength() >= metadataProps.getLength() && context.storageFactory.truncationOffsets.getOrDefault(metadataProps.getName(), 0L) >= metadataProps.getStartOffset(); AtomicBoolean canContinue = new AtomicBoolean(true); TimeoutTimer timer = new TimeoutTimer(TIMEOUT); return Futures.loop( canContinue::get, () -> Futures.exceptionallyExpecting( context.storage.getStreamSegmentInfo(metadataProps.getName(), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException, StreamSegmentInformation.builder().name(metadataProps.getName()).build()) .thenCompose(storageProps -> { if (meetsConditions.apply(storageProps)) { canContinue.set(false); return CompletableFuture.completedFuture(null); } else if (!timer.hasRemaining()) { return Futures.failedFuture(new TimeoutException()); } else { return Futures.delayedFuture(Duration.ofMillis(10), executorService()); } }).thenRun(Runnables.doNothing()), executorService()); }
/** * Tests the getStreamSegmentInfo() method. */ @Test public void testGetStreamSegmentInfo() { @Cleanup val context = new TestContext(); context.container.startAsync().awaitRunning(); // Non-existent segment. AssertExtensions.assertSuppliedFutureThrows( "Unexpected exception when the segment does not exist.", () -> context.container.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException); // Create a segment, add some data, set some attributes, "truncate" it and then seal it. val storageInfo = context.storage.create(SEGMENT_NAME, TIMEOUT) .thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[10]), 10, TIMEOUT)) .thenCompose(v -> context.storage.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT)).join(); val expectedInfo = StreamSegmentInformation.from(storageInfo) .startOffset(storageInfo.getLength() / 2) .attributes(ImmutableMap.of(UUID.randomUUID(), 100L, Attributes.EVENT_COUNT, 1L)) .build(); // Fetch the SegmentInfo from the ReadOnlyContainer and verify it is as expected. val actual = context.container.getStreamSegmentInfo(SEGMENT_NAME, TIMEOUT).join(); Assert.assertEquals("Unexpected Name.", expectedInfo.getName(), actual.getName()); Assert.assertEquals("Unexpected Length.", expectedInfo.getLength(), actual.getLength()); Assert.assertEquals("Unexpected Sealed status.", expectedInfo.isSealed(), actual.isSealed()); }
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties sp, StreamSegmentStore readOnlyStore) { if (sp.getLength() == 0) { // Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this. return CompletableFuture.completedFuture(null); } TimeoutTimer timer = new TimeoutTimer(TIMEOUT); AtomicBoolean tryAgain = new AtomicBoolean(true); return Futures.loop( tryAgain::get, () -> Futures .exceptionallyExpecting(readOnlyStore.getStreamSegmentInfo(sp.getName(), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException, StreamSegmentInformation.builder().name(sp.getName()).build()) .thenCompose(storageProps -> { if (sp.isSealed()) { tryAgain.set(!storageProps.isSealed()); } else { tryAgain.set(sp.getLength() != storageProps.getLength()); } if (tryAgain.get() && !timer.hasRemaining()) { return Futures.<Void>failedFuture(new TimeoutException( String.format("Segment %s did not complete in Storage in the allotted time.", sp.getName()))); } else { return Futures.delayedFuture(Duration.ofMillis(100), executorService()); } }), executorService()); }
/** * Reads a range of bytes from a Segment in Storage. * * @param handle A SegmentHandle pointing to the Segment to read from. * @param startOffset The first offset within the Segment to read from. * @param maxReadLength The maximum number of bytes to read. * @param readBlockSize The maximum number of bytes to read at once (the returned StreamSegmentReadResult will be * broken down into Entries smaller than or equal to this size). * @param storage A ReadOnlyStorage to execute the reads against. * @return A StreamSegmentReadResult that can be used to process the data. This will be made up of ReadResultEntries * of the following types: Storage, Truncated or EndOfSegment. */ public static StreamSegmentReadResult read(SegmentHandle handle, long startOffset, int maxReadLength, int readBlockSize, ReadOnlyStorage storage) { Exceptions.checkArgument(startOffset >= 0, "startOffset", "startOffset must be a non-negative number."); Exceptions.checkArgument(maxReadLength >= 0, "maxReadLength", "maxReadLength must be a non-negative number."); Preconditions.checkNotNull(handle, "handle"); Preconditions.checkNotNull(storage, "storage"); String traceId = String.format("Read[%s]", handle.getSegmentName()); // Build a SegmentInfo using the information we are given. If startOffset or length are incorrect, the underlying // ReadOnlyStorage will throw appropriate exceptions at the caller. StreamSegmentInformation segmentInfo = StreamSegmentInformation.builder().name(handle.getSegmentName()) .startOffset(startOffset) .length(startOffset + maxReadLength) .build(); return new StreamSegmentReadResult(startOffset, maxReadLength, new SegmentReader(segmentInfo, handle, readBlockSize, storage), traceId); }
/** * Test the {@link IndexReader#getLastIndexedOffset(SegmentProperties)} method. */ @Test public void testTableAttributes() { val ir = newReader(); Assert.assertEquals("Unexpected value for TABLE_INDEX_OFFSET when attribute is not present.", 0, ir.getLastIndexedOffset(StreamSegmentInformation.builder().name("s").build())); Assert.assertEquals("Unexpected value for TABLE_ENTRY_COUNT when attribute is not present.", 0, ir.getEntryCount(StreamSegmentInformation.builder().name("s").build())); val si = StreamSegmentInformation.builder().name("s") .attributes(ImmutableMap.<UUID, Long>builder() .put(Attributes.TABLE_INDEX_OFFSET, 123456L) .put(Attributes.TABLE_ENTRY_COUNT, 2345L) .put(Attributes.TABLE_BUCKET_COUNT, 3456L) .build()) .build(); Assert.assertEquals("Unexpected value for TABLE_INDEX_OFFSET when attribute present.", 123456, ir.getLastIndexedOffset(si)); Assert.assertEquals("Unexpected value for TABLE_ENTRY_COUNT when attribute present.", 2345, ir.getEntryCount(si)); Assert.assertEquals("Unexpected value for TABLE_BUCKET_COUNT when attribute present.", 3456, ir.getBucketCount(si)); }
private ArrayList<Operation> populate(MemoryStateUpdater updater, int segmentCount, int operationCountPerType) throws DataCorruptionException { ArrayList<Operation> operations = new ArrayList<>(); long offset = 0; for (int i = 0; i < segmentCount; i++) { for (int j = 0; j < operationCountPerType; j++) { StreamSegmentMapOperation mapOp = new StreamSegmentMapOperation( StreamSegmentInformation.builder().name("a").length( i * j).build()); mapOp.setStreamSegmentId(i); operations.add(mapOp); StreamSegmentAppendOperation appendOp = new StreamSegmentAppendOperation(i, Integer.toString(i).getBytes(), null); appendOp.setStreamSegmentOffset(offset); offset += appendOp.getData().length; operations.add(appendOp); operations.add(new MergeSegmentOperation(i, j)); } } for (int i = 0; i < operations.size(); i++) { operations.get(i).setSequenceNumber(i); } updater.process(operations.iterator()); return operations; }
/** * Creates a number of Transaction Segments in the given Metadata and OperationLog. */ AbstractMap<Long, Long> createTransactionsWithOperations(Set<Long> streamSegmentIds, int transactionsPerStreamSegment, ContainerMetadata containerMetadata, OperationLog durableLog) { val result = new HashMap<Long, Long>(); for (long streamSegmentId : streamSegmentIds) { String streamSegmentName = containerMetadata.getStreamSegmentMetadata(streamSegmentId).getName(); for (int i = 0; i < transactionsPerStreamSegment; i++) { String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(streamSegmentName, UUID.randomUUID()); StreamSegmentMapOperation op = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name(transactionName).build()); durableLog.add(op, TIMEOUT).join(); result.put(op.getStreamSegmentId(), streamSegmentId); } } return result; }
/** * Tests the read() method on a truncated segment. */ @Test public void testTruncatedSegment() throws Exception { @Cleanup val s = createStorage(); val writtenData = populate(s); val si = StreamSegmentInformation.builder().name(SEGMENT_NAME).length(writtenData.length).startOffset(SEGMENT_APPEND_SIZE) .sealed(false).build(); // 1. Read a truncated offset. val truncatedResult = StreamSegmentStorageReader.read(si, 0, writtenData.length + 1, SEGMENT_APPEND_SIZE - 1, s); verifyReadResult(truncatedResult, si, 0, 0, writtenData); // 2. Read a truncated offset. val nonTruncatedResult = StreamSegmentStorageReader.read(si, SEGMENT_APPEND_SIZE, writtenData.length + 1, SEGMENT_APPEND_SIZE - 1, s); verifyReadResult(nonTruncatedResult, si, SEGMENT_APPEND_SIZE, writtenData.length - SEGMENT_APPEND_SIZE, writtenData); }
private StreamSegmentInformation doGetStreamSegmentInfo(String streamSegmentName) { long traceId = LoggerHelpers.traceEnter(log, "getStreamSegmentInfo", streamSegmentName); S3ObjectMetadata result = client.getObjectMetadata(config.getBucket(), config.getRoot() + streamSegmentName); AccessControlList acls = client.getObjectAcl(config.getBucket(), config.getRoot() + streamSegmentName); boolean canWrite = acls.getGrants().stream().anyMatch(grant -> grant.getPermission().compareTo(Permission.WRITE) >= 0); StreamSegmentInformation information = StreamSegmentInformation.builder() .name(streamSegmentName) .length(result.getContentLength()) .sealed(!canWrite) .lastModified(new ImmutableDate(result.getLastModified().toInstant().toEpochMilli())) .build(); LoggerHelpers.traceLeave(log, "getStreamSegmentInfo", traceId, streamSegmentName); return information; }