/** * Gets a value indicating the current length of the Segment, in bytes. * * @return The length. */ synchronized long length() { SegmentChunk lastSegmentChunk = lastChunk(); return lastSegmentChunk == null ? 0L : lastSegmentChunk.getLastOffset(); }
/** * Serializes an entry that indicates a number of SegmentChunks are concatenated at a specified offset. * * @param chunkCount The number of SegmentChunks to concat. * @param concatOffset The concat offset. * @return A byte array containing the serialization. */ static byte[] serializeConcat(int chunkCount, long concatOffset) { return combine(KEY_CONCAT, chunkCount + CONCAT_SEPARATOR + concatOffset); }
private boolean canTruncate(SegmentChunk segmentChunk, long truncationOffset) { // We should only truncate those SegmentChunks that are entirely before the truncationOffset. An empty SegmentChunk // that starts exactly at the truncationOffset should be spared (this means we truncate the entire Segment), as // we need that SegmentChunk to determine the actual length of the Segment. return segmentChunk.getStartOffset() < truncationOffset && segmentChunk.getLastOffset() <= truncationOffset; }
/** * Creates a new instance of the SegmentChunk class with the same information as this one, but with a new offset. * * @param newOffset The new offset. * @return A new SegmentChunk. */ SegmentChunk withNewOffset(long newOffset) { SegmentChunk ns = new SegmentChunk(this.name, newOffset); ns.setLength(getLength()); if (isSealed()) { ns.markSealed(); } if (!exists()) { ns.markInexistent(); } return ns; }
private boolean shouldConcatNatively(RollingSegmentHandle source, RollingSegmentHandle target) { if (source.getHeaderHandle() == null) { // Source does not have a Header, hence we cannot do Header concat. return true; } SegmentChunk lastSource = source.lastChunk(); SegmentChunk lastTarget = target.lastChunk(); return lastSource != null && lastSource.getStartOffset() == 0 && lastTarget != null && !lastTarget.isSealed() && lastTarget.getLength() + lastSource.getLength() <= target.getRollingPolicy().getMaxLength(); }
private void createChunk(RollingSegmentHandle handle) throws StreamSegmentException { // Create new active SegmentChunk, only after which serialize the handle update and update the handle. // We ignore if the SegmentChunk exists and is empty - that's most likely due to a previous failed attempt. long segmentLength = handle.length(); SegmentChunk newSegmentChunk = SegmentChunk.forSegment(handle.getSegmentName(), segmentLength); try { this.baseStorage.create(newSegmentChunk.getName()); } catch (StreamSegmentExistsException ex) { checkIfEmptyAndNotSealed(ex, newSegmentChunk.getName()); } serializeNewChunk(handle, newSegmentChunk); val activeHandle = this.baseStorage.openWrite(newSegmentChunk.getName()); handle.addChunk(newSegmentChunk, activeHandle); log.debug("Created new SegmentChunk '{}' for '{}'.", newSegmentChunk, handle); }
private void refreshChunkExistence(RollingSegmentHandle handle) { // We check all SegmentChunks that we assume exist for actual existence (since once deleted, they can't come back). for (SegmentChunk s : handle.chunks()) { if (s.exists() && !this.baseStorage.exists(s.getName())) { s.markInexistent(); } } }
/** * Tests the basic Serialization-Deserialization for a Handle with no concat executed on it. */ @Test public void testNormalSerialization() { final int chunkCount = 1000; val source = newHandle(chunkCount); val serialization = serialize(source); val newHandle = HandleSerializer.deserialize(serialization, source.getHeaderHandle()); assertHandleEquals(source, newHandle, source.getHeaderHandle()); Assert.assertEquals("getHeaderLength", serialization.length, newHandle.getHeaderLength()); }
@Override public SegmentHandle openWrite(String segmentName) throws StreamSegmentException { long traceId = LoggerHelpers.traceEnter(log, "openWrite", segmentName); val handle = openHandle(segmentName, false); // Finally, open the Active SegmentChunk for writing. SegmentChunk last = handle.lastChunk(); if (last != null && !last.isSealed()) { val activeHandle = this.baseStorage.openWrite(last.getName()); handle.setActiveChunkHandle(activeHandle); } LoggerHelpers.traceLeave(log, "openWrite", traceId, handle); return handle; }
private void serializeBeginConcat(RollingSegmentHandle targetHandle, RollingSegmentHandle sourceHandle) throws StreamSegmentException { byte[] updateData = HandleSerializer.serializeConcat(sourceHandle.chunks().size(), targetHandle.length()); updateHandle(targetHandle, updateData); }
@Override public SegmentProperties getStreamSegmentInfo(String segmentName) throws StreamSegmentException { val handle = (RollingSegmentHandle) openRead(segmentName); return StreamSegmentInformation .builder() .name(handle.getSegmentName()) .sealed(handle.isSealed()) .length(handle.length()) .build(); }
private void checkConcatResult(RollingStorage s, RollingSegmentHandle targetHandle, String sourceSegmentName, int expectedChunkCount, int expectedLength) throws Exception { Assert.assertFalse("Expecting the source segment to not exist anymore.", s.exists(sourceSegmentName)); Assert.assertEquals("Unexpected number of SegmentChunks in target.", expectedChunkCount, targetHandle.chunks().size()); Assert.assertEquals("Unexpected target length.", expectedLength, targetHandle.length()); // Reload the handle and verify nothing strange happened in Storage. val targetHandle2 = (RollingSegmentHandle) s.openWrite(SEGMENT_NAME); Assert.assertEquals("Unexpected number of SegmentChunks in reloaded target handle.", expectedChunkCount, targetHandle2.chunks().size()); Assert.assertEquals("Unexpected reloaded target length.", targetHandle.length(), targetHandle2.length()); }
/** * Serializes a single SegmentChunk. * * @param segmentChunk The SegmentChunk to serialize. * @return A byte array containing the serialization. */ static byte[] serializeChunk(SegmentChunk segmentChunk) { return combine(Long.toString(segmentChunk.getStartOffset()), segmentChunk.getName()); }
@Override protected Storage createStorage() { return new AsyncStorageWrapper(new RollingStorage(new InMemoryStorage(), DEFAULT_ROLLING_POLICY), executorService()); }
private RollingSegmentHandle readHeader(SegmentProperties headerInfo, SegmentHandle headerHandle) throws StreamSegmentException { byte[] readBuffer = new byte[(int) headerInfo.getLength()]; this.baseStorage.read(headerHandle, 0, readBuffer, 0, readBuffer.length); RollingSegmentHandle handle = HandleSerializer.deserialize(readBuffer, headerHandle); if (headerInfo.isSealed()) { handle.markSealed(); } return handle; }
private void serializeNewChunk(RollingSegmentHandle handle, SegmentChunk newSegmentChunk) throws StreamSegmentException { updateHandle(handle, HandleSerializer.serializeChunk(newSegmentChunk)); }
@Override public SegmentHandle create(String streamSegmentName) throws StreamSegmentException { return create(streamSegmentName, this.defaultRollingPolicy); }
private void checkWrittenData(byte[] writtenData, SegmentHandle readHandle, RollingStorage s) throws StreamSegmentException { checkWrittenData(writtenData, 0, readHandle, s); }
private RollingSegmentHandle newHandle(int chunkCount) { return newHandle(SEGMENT_NAME, chunkCount); }
private void populate(RollingStorage s, SegmentHandle writeHandle, ByteArrayOutputStream writeStream) throws Exception { populate(s, writeHandle, WRITE_COUNT, SMALL_WRITE_LENGTH, LARGE_WRITE_LENGTH, writeStream); }