static void writeRecordsForNonBlockingReads(DistributedLogConfiguration conf, DistributedLogManager dlm, boolean recover, long segmentSize) throws Exception { long txId = 1; for (long i = 0; i < 3; i++) { BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); for (long j = 1; j < segmentSize; j++) { FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); } if (recover) { FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); TimeUnit.MILLISECONDS.sleep(300); writer.abort(); LOG.debug("Recovering Segments"); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true); FutureUtils.result(blplm.recoverIncompleteLogSegments()); FutureUtils.result(blplm.asyncClose()); LOG.debug("Recovered Segments"); } else { FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); writer.closeAndComplete(); } TimeUnit.MILLISECONDS.sleep(300); } }
public void force(boolean metadata) throws IOException { long pos = 0; try { pos = Await.result(logWriter.flushAndCommit()); } catch (IOException ioe) { throw ioe; } catch (Exception ex) { LOG.error("unexpected exception in AppendOnlyStreamWriter.force ", ex); throw new UnexpectedException("unexpected exception in AppendOnlyStreamWriter.force", ex); } synchronized (syncPos) { syncPos[0] = pos; } }
private BKLogSegmentWriter getCachedLogSegmentWriter() throws WriteException { if (encounteredError) { throw new WriteException(bkDistributedLogManager.getStreamName(), "writer has been closed due to error."); } BKLogSegmentWriter segmentWriter = getCachedLogWriter(); if (null != segmentWriter && segmentWriter.isLogSegmentInError() && !disableRollOnSegmentError) { return null; } else { return segmentWriter; } }
Future<Long> flushAndCommit() { Future<BKLogSegmentWriter> writerFuture; synchronized (this) { if (null != this.rollingFuture) { writerFuture = this.rollingFuture; } else { writerFuture = getCachedLogWriterFuture(); } } if (null == writerFuture) { return Future.value(getLastTxId()); } return writerFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Long>>() { @Override public Future<Long> apply(BKLogSegmentWriter writer) { return writer.flushAndCommit(); } }); }
private List<Future<DLSN>> asyncWriteBulk(List<LogRecord> records) { final ArrayList<Future<DLSN>> results = new ArrayList<Future<DLSN>>(records.size()); Iterator<LogRecord> iterator = records.iterator(); while (iterator.hasNext()) { LogRecord record = iterator.next(); Future<DLSN> future = asyncWrite(record, !iterator.hasNext()); results.add(future); // Abort early if an individual write has already failed. Option<Try<DLSN>> result = future.poll(); if (result.isDefined() && result.get().isThrow()) { break; } } if (records.size() > results.size()) { appendCancelledFutures(results, records.size() - results.size()); } return results; }
DistributedLogManager dlm = createNewDLM(conf, name); BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.write(DLMTestUtil.getLogRecordInstance(2L)); writer.closeAndComplete();
for (long j = 1; j <= 4; j++) { if (j > 1) { writer0.setForceRolling(true); writer1.setForceRolling(true); FutureUtils.result(writer1.write(DLMTestUtil.getLogRecordInstance(txid++))); FutureUtils.result(writer0.write(DLMTestUtil.getLogRecordInstance(txid++))); writer0.setForceRolling(false); writer1.setForceRolling(false); FutureUtils.result(writer1.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); FutureUtils.result(writer0.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); if (null == reader0) { reader0 = dlmreader0.getInputStream(1);
Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txId++))); BKLogSegmentWriter logWriter = writer.getCachedLogWriter(); writer.write(DLMTestUtil.getLogRecordInstance(txId++)); writer.closeAndComplete(); fail("Should fail to complete a log segment when its ledger is fenced"); } catch (IOException ioe) {
for (int k = 1; k <= 10; k++) { if (k == 5) { writer0.setForceRolling(true); writer0.overRideMinTimeStampToKeep(retentionPeriodOverride); writer1.setForceRolling(true); writer1.overRideMinTimeStampToKeep(retentionPeriodOverride); DLSN dlsn1 = FutureUtils.result(writer1.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer1 write record {}", dlsn1); DLSN dlsn0 = FutureUtils.result(writer0.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer0 write record {}", dlsn0); if (k == 5) { writer0.setForceRolling(false); writer1.setForceRolling(false); retentionPeriodOverride = System.currentTimeMillis(); FutureUtils.result(writer1.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); FutureUtils.result(writer0.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); writer0.close(); writer1.close();
LogRecord record = DLMTestUtil.getLogRecordInstance(txid++); record.setControl(); Await.result(writer.writeControlRecord(record)); writer.closeAndComplete(); LOG.info("Completed first log segment"); Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("Completed second log segment"); LogRecord record = DLMTestUtil.getLogRecordInstance(txid++); record.setControl(); Await.result(writer.write(record)); writer.closeAndComplete(); LOG.info("Completed third log segment"); writer.close(); dlm.close();
Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txid++))); writer.flushAndCommit(); BKLogSegmentWriter perStreamWriter = writer.getCachedLogWriter(); DistributedLock lock = perStreamWriter.getLock(); FutureUtils.result(lock.asyncClose()); writer.write(DLMTestUtil.getLogRecordInstance(txid++)); Await.result(writer.write(DLMTestUtil.getLogRecordInstance(txid++))); fail("should have thrown"); } catch (LockingException ex) { writer.close(); dlm.close();
int i = 1; for (; i <= NUM_RECORDS; i++) { Await.result(writer.write(DLMTestUtil.getLogRecordInstance(i))); assertEquals("last tx id should become " + i, i, writer.getLastTxId()); Await.result(writer.markEndOfStream()); Await.result(writer.markEndOfStream()); Await.result(writer.write(DLMTestUtil.getLogRecordInstance(i))); fail("Should have thrown"); } catch (EndOfStreamException ex) {
final long currentLogSegmentSeqNo = i + 1; BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); DLSN dlsn = Await.result(writer.writeControlRecord(new LogRecord(txid++, "control".getBytes(UTF_8)))); assertEquals(currentLogSegmentSeqNo, dlsn.getLogSegmentSequenceNo()); assertEquals(0, dlsn.getEntryId()); for (long j = 1; j < 10; j++) { final LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Await.result(writer.write(record)); writer.closeAndComplete();
Future<DLSN> writeFuture = writer.write(DLMTestUtil.getLogRecordInstance(entryId)).addEventListener(new FutureEventListener<DLSN>() { @Override public void onSuccess(DLSN value) { DLSN newDLSN = Await.result(writer.write(DLMTestUtil.getLogRecordInstance(numLogSegments + i))); logger.info("Completed entry {} : {}", numLogSegments + i, newDLSN); ensureOnlyOneInprogressLogSegments(segments); writer.close(); dlm.close();
for (int k = 1; k <= 6; k++) { if (k == 3) { writer0.setForceRecovery(true); writer1.setForceRecovery(true); DLSN dlsn1 = FutureUtils.result(writer1.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer1 write record {} - txid = {}", dlsn1, txid-1); DLSN dlsn0 = FutureUtils.result(writer0.write(DLMTestUtil.getLogRecordInstance(txid++))); LOG.info("writer0 write record {} - txid = {}", dlsn0, txid-1); writer0.setForceRecovery(false); writer1.setForceRecovery(false); FutureUtils.result(writer1.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); FutureUtils.result(writer0.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); if (null == reader0) { reader0 = dlmreader0.getInputStream(1);
Future<DLSN> result = writer.write(record); DLSN dlsn = validateFutureSucceededAndGetResult(result); assertEquals(1, dlsn.getLogSegmentSequenceNo()); records.add(DLMTestUtil.getLogRecordInstance(txid++, 2048)); records.add(DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1)); futureResults = writer.writeBulk(records); results = validateFutureSucceededAndGetResult(futureResults); result = results.get(0); futureResults = writer.writeBulk(records); validateFutureFailed(futureResults, WriteException.class); writer.closeAndComplete(); dlm.close();
for (long j = 1; j <= 10; j++) { LogRecord record = DLMTestUtil.getLargeLogRecordInstance(txid++); Future<DLSN> dlsn = writer.write(record); writer.close(); Assert.assertTrue(Await.result(writer.truncate(truncDLSN))); BKLogWriteHandler handler = writer.getCachedWriteHandler(); List<LogSegmentMetadata> cachedSegments = handler.getFullLedgerList(false, false); for (LogSegmentMetadata segment: cachedSegments) {
for (long j = 1; j <= 4; j++) { for (int k = 1; k <= 10; k++) { FutureUtils.result(writer1.write(DLMTestUtil.getLogRecordInstance(txid++))); FutureUtils.result(writer0.write(DLMTestUtil.getLogRecordInstance(txid++))); FutureUtils.result(writer1.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); FutureUtils.result(writer0.writeControlRecord(DLMTestUtil.getLogRecordInstance(txid-1))); if (null == reader0) { reader0 = dlmreader0.getInputStream(1);
@Test(timeout = 60000) public void testMarkEndOfStreamAtBeginningOfSegment() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter) dlm.startAsyncLogSegmentNonPartitioned(); Await.result(writer.markEndOfStream()); try { Await.result(writer.write(DLMTestUtil.getLogRecordInstance(1))); fail("Should have thrown"); } catch (EndOfStreamException ex) { } BKAsyncLogReaderDLSN reader = (BKAsyncLogReaderDLSN) dlm.getAsyncLogReader(DLSN.InitialDLSN); try { LogRecord record = Await.result(reader.readNext()); fail("Should have thrown"); } catch (EndOfStreamException ex) { } }