@Test(timeout = 60000) public void testSkipToSkipsBytesWithLargerLogRecords() throws Exception { String name = testNames.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(false); confLocal.setOutputBufferSize(1024*100); confLocal.setPeriodicFlushFrequencyMilliSeconds(1000*60); skipForwardThenSkipBack(name, confLocal); }
public Builder conf(DistributedLogConfiguration conf) { this.digestpw = conf.getBKDigestPW(); return this; }
public static void unbind(URI uri) throws IOException { DistributedLogConfiguration conf = new DistributedLogConfiguration(); ZooKeeperClient zkc = ZooKeeperClientBuilder.newBuilder() .sessionTimeoutMs(conf.getZKSessionTimeoutMilliseconds()) .retryThreadCount(conf.getZKClientNumberRetryThreads()) .requestRateLimit(conf.getZKRequestRateLimit()) .zkAclId(conf.getZkAclId()) .uri(uri) .build(); byte[] data = new byte[0]; try { zkc.get().setData(uri.getPath(), data, -1); } catch (KeeperException ke) { throw new IOException("Fail to unbound dl metadata on uri " + uri, ke); } catch (InterruptedException ie) { throw new IOException("Interrupted when unbinding dl metadata on uri " + uri, ie); } finally { zkc.close(); } }
/** * Get the quorum config for each log segment (ledger). * * @return quorum config that used by log segments * @see #getEnsembleSize() * @see #getWriteQuorumSize() * @see #getAckQuorumSize() */ public QuorumConfig getQuorumConfig() { return new QuorumConfig( getEnsembleSize(), getWriteQuorumSize(), getAckQuorumSize()); }
protected PerDLCommand(String name, String description) { super(name, description); dlConf = new DistributedLogConfiguration(); // Tools are allowed to read old metadata as long as they can interpret it dlConf.setDLLedgerMetadataSkipMinVersionCheck(true); options.addOption("u", "uri", true, "DistributedLog URI"); options.addOption("c", "conf", true, "DistributedLog Configuration File"); options.addOption("a", "zk-acl-id", true, "Zookeeper ACL ID"); options.addOption("f", "force", false, "Force command (no warnings or prompts)"); }
public TestAsyncReaderWriter() { this.testConf = new DistributedLogConfiguration(); this.testConf.loadConf(conf); this.testConf.setReaderIdleErrorThresholdMillis(1200000); }
@Test(timeout = 60000) public void testReadBrokenEntries() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(true); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadBatchSize(1); confLocal.setPositionGapDetectionEnabled(false); confLocal.setReadAheadSkipBrokenEntries(true); confLocal.setEIInjectReadAheadBrokenEntries(true); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 3; int numRecordsPerLogSegment = 10; long txid = 1L; txid = writeRecords(dlm, numLogSegments, numRecordsPerLogSegment, txid, false); AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InvalidDLSN); // 3 segments, 10 records each, immediate flush, batch size 1, so just the first // record in each ledger is discarded, for 30 - 3 = 27 records. for (int i = 0; i < 27; i++) { LogRecordWithDLSN record = Await.result(reader.readNext()); assertFalse(record.getDlsn().getEntryId() % 10 == 0); } Utils.close(reader); dlm.close(); }
@Test(timeout = 60000) public void testImmediateFlush() throws Exception { String name = "distrlog-immediate-flush"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setOutputBufferSize(0); testNonPartitionedWritesInternal(name, confLocal); }
private DistributedLogConfiguration newLocalConf() { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); return confLocal; }
int zkSessionTimeout = conf.getBKClientZKSessionTimeoutMilliSeconds(); RetryPolicy retryPolicy = null; if (conf.getBKClientZKNumRetries() > 0) { retryPolicy = new BoundExponentialBackoffRetryPolicy( conf.getBKClientZKRetryBackoffStartMillis(), conf.getBKClientZKRetryBackoffMaxMillis(), conf.getBKClientZKNumRetries()); if (conf.getZkAclId() != null) { credentials = new DigestCredentials(conf.getZkAclId(), conf.getZkAclId()); retryPolicy, statsLogger.scope("bkc_zkc"), conf.getZKClientNumberRetryThreads(), conf.getBKClientZKRequestRateLimit(), credentials); registerExpirationHandler = conf.getBKClientZKNumRetries() <= 0; "sessionTimeout = {}, backoff = {}, maxBackoff = {}, dnsResolver = {}, registerExpirationHandler = {}", new Object[] { name, ledgersPath, conf.getBKClientZKNumRetries(), conf.getBKClientZKSessionTimeoutMilliSeconds(), conf.getBKClientZKRetryBackoffStartMillis(), conf.getBKClientZKRetryBackoffMaxMillis(), conf.getBkDNSResolverOverrides(), registerExpirationHandler }); } else { LOG.info("BookKeeper Client created {} with shared zookeeper client : ledgersPath = {}, numRetries = {}, " + "sessionTimeout = {}, backoff = {}, maxBackoff = {}, dnsResolver = {}, registerExpirationHandler = {}", new Object[] { name, ledgersPath, conf.getZKNumRetries(), conf.getZKSessionTimeoutMilliseconds(), conf.getZKRetryBackoffStartMillis(), conf.getZKRetryBackoffMaxMillis(), conf.getBkDNSResolverOverrides(), registerExpirationHandler });
protected AuditCommand(String name, String description) { super(name, description); dlConf = new DistributedLogConfiguration(); options.addOption("u", "uris", true, "List of distributedlog uris, separated by comma"); options.addOption("c", "conf", true, "DistributedLog Configuration File"); options.addOption("a", "zk-acl-id", true, "ZooKeeper ACL ID"); options.addOption("f", "force", false, "Force command (no warnings or prompts)"); }
@Test(timeout = 20000) public void loadStreamConfGoodOverrideAccepted() throws Exception { DistributedLogConfiguration conf = new DistributedLogConfiguration(); assertEquals(conf.getPeriodicFlushFrequencyMilliSeconds(), DistributedLogConfiguration.BKDL_PERIODIC_FLUSH_FREQUENCY_MILLISECONDS_DEFAULT); assertEquals(conf.getReaderIdleErrorThresholdMillis(), DistributedLogConfiguration.BKDL_READER_IDLE_ERROR_THRESHOLD_MILLIS_DEFAULT); DistributedLogConfiguration override = new DistributedLogConfiguration(); override.setPeriodicFlushFrequencyMilliSeconds( DistributedLogConfiguration.BKDL_PERIODIC_FLUSH_FREQUENCY_MILLISECONDS_DEFAULT+1); override.setReaderIdleErrorThresholdMillis( DistributedLogConfiguration.BKDL_READER_IDLE_ERROR_THRESHOLD_MILLIS_DEFAULT - 1); conf.loadStreamConf(Optional.of(override)); assertEquals(conf.getPeriodicFlushFrequencyMilliSeconds(), DistributedLogConfiguration.BKDL_PERIODIC_FLUSH_FREQUENCY_MILLISECONDS_DEFAULT+1); assertEquals(conf.getReaderIdleErrorThresholdMillis(), DistributedLogConfiguration.BKDL_READER_IDLE_ERROR_THRESHOLD_MILLIS_DEFAULT - 1); }
@Test(timeout = 60000) public void testBulkAsyncReadWithWriteBatch() throws Exception { String name = "distrlog-bulkasyncread-with-writebatch"; DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setOutputBufferSize(1024000); confLocal.setReadAheadWaitTime(10); confLocal.setReadAheadMaxRecords(10000); confLocal.setReadAheadBatchSize(10); DistributedLogManager dlm = createNewDLM(confLocal, name); int numLogSegments = 3; int numRecordsPerLogSegment = 20; writeRecords(dlm, numLogSegments, numRecordsPerLogSegment, 1L, false); final AsyncLogReader reader = dlm.getAsyncLogReader(DLSN.InitialDLSN); int expectedTxID = 1; for (long i = 0; i < 3; i++) { // since we batched 20 entries into single bookkeeper entry // we should be able to read 20 entries as a batch. List<LogRecordWithDLSN> records = Await.result(reader.readBulk(20)); assertEquals(20, records.size()); for (LogRecordWithDLSN record : records) { assertEquals(expectedTxID, record.getTransactionId()); ++expectedTxID; } } Utils.close(reader); dlm.close(); }
private static ZooKeeperClientBuilder createDLZKClientBuilder(String zkcName, DistributedLogConfiguration conf, String zkServers, StatsLogger statsLogger) { RetryPolicy retryPolicy = null; if (conf.getZKNumRetries() > 0) { retryPolicy = new BoundExponentialBackoffRetryPolicy( conf.getZKRetryBackoffStartMillis(), conf.getZKRetryBackoffMaxMillis(), conf.getZKNumRetries()); } ZooKeeperClientBuilder builder = ZooKeeperClientBuilder.newBuilder() .name(zkcName) .sessionTimeoutMs(conf.getZKSessionTimeoutMilliseconds()) .retryThreadCount(conf.getZKClientNumberRetryThreads()) .requestRateLimit(conf.getZKRequestRateLimit()) .zkServers(zkServers) .retryPolicy(retryPolicy) .statsLogger(statsLogger) .zkAclId(conf.getZkAclId()); LOG.info("Created shared zooKeeper client builder {}: zkServers = {}, numRetries = {}, sessionTimeout = {}, retryBackoff = {}," + " maxRetryBackoff = {}, zkAclId = {}.", new Object[] { zkcName, zkServers, conf.getZKNumRetries(), conf.getZKSessionTimeoutMilliseconds(), conf.getZKRetryBackoffStartMillis(), conf.getZKRetryBackoffMaxMillis(), conf.getZkAclId() }); return builder; }
@Test(timeout = 60000) public void testOutstandingWriteLimitBlockAllLimitWithDarkmode() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(true); confLocal.setPerWriterOutstandingWriteLimit(0); confLocal.setOutstandingWriteLimitDarkmode(true); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); ArrayList<Future<DLSN>> results = new ArrayList<Future<DLSN>>(1000); for (int i = 0; i < 1000; i++) { results.add(writer.write(DLMTestUtil.getLogRecordInstance(1L))); } for (Future<DLSN> result : results) { Await.result(result); } writer.closeAndComplete(); dlm.close(); }
/** * Non durable write should fail if writer is marked as end of stream. * * @throws Exception */ @Test(timeout = 60000) public void testNondurableWriteAfterEndOfStream() throws Exception { DistributedLogConfiguration confLocal = newLocalConf(); confLocal.setImmediateFlushEnabled(false); confLocal.setOutputBufferSize(Integer.MAX_VALUE); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setDurableWriteEnabled(false); ZKDistributedLock lock = createLock("/test/lock-" + runtime.getMethodName(), zkc, true); BKLogSegmentWriter writer = createLogSegmentWriter(confLocal, 0L, -1L, lock); FutureUtils.result(writer.markEndOfStream()); try { Await.result(writer.asyncWrite(DLMTestUtil.getLogRecordInstance(1))); fail("Should fail the write if the writer is marked as end of stream"); } catch (EndOfStreamException we) { // expected } closeWriterAndLock(writer, lock); }
@Test(timeout = 60000) public void testWriteFutureDoesNotCompleteUntilWritePersisted() throws Exception { String name = testNames.getMethodName(); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setPeriodicFlushFrequencyMilliSeconds(Integer.MAX_VALUE); conf.setImmediateFlushEnabled(false); DistributedLogManager dlmwriter = createNewDLM(conf, name); DistributedLogManager dlmreader = createNewDLM(conf, name); byte[] byteStream = DLMTestUtil.repeatString("abc", 51).getBytes(); // Can't reliably test the future is not completed until fsync is called, since writer.force may just // happen very quickly. But we can test that the mechanics of the future write and api are basically // correct. AppendOnlyStreamWriter writer = dlmwriter.getAppendOnlyStreamWriter(); Future<DLSN> dlsnFuture = writer.write(DLMTestUtil.repeatString("abc", 11).getBytes()); // The real problem is the fsync completes before writes are submitted, so it never takes effect. Thread.sleep(1000); assertFalse(dlsnFuture.isDefined()); writer.force(false); // Must not throw. Await.result(dlsnFuture, Duration.fromSeconds(5)); writer.close(); dlmwriter.close(); AppendOnlyStreamReader reader = dlmreader.getAppendOnlyStreamReader(); byte[] bytesIn = new byte[byteStream.length]; int read = reader.read(bytesIn, 0, 31); assertEquals(31, read); reader.close(); dlmreader.close(); }
LedgerHandle lh = bkc.get().createLedger(conf.getEnsembleSize(), conf.getWriteQuorumSize(), conf.getAckQuorumSize(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes()); String inprogressZnodeName = writeHandler.inprogressZNodeName(lh.getId(), startTxID, logSegmentSeqNo); String znodePath = writeHandler.inprogressZNode(lh.getId(), startTxID, logSegmentSeqNo); LogSegmentMetadata l = new LogSegmentMetadata.LogSegmentMetadataBuilder(znodePath, conf.getDLLedgerMetadataLayoutVersion(), lh.getId(), startTxID) .setLogSegmentSequenceNo(logSegmentSeqNo) .build(); inprogressZnodeName, conf, conf.getDLLedgerMetadataLayoutVersion(), new BKLogSegmentEntryWriter(lh), writeHandler.lock,
@Test(timeout = 60000) public void testAsyncWritePendingWritesAbortedWhenLedgerRollTriggerFails() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setOutputBufferSize(1024); confLocal.setMaxLogSegmentBytes(1024); confLocal.setLogSegmentRollingIntervalMinutes(0); DistributedLogManager dlm = createNewDLM(confLocal, name); BKAsyncLogWriter writer = (BKAsyncLogWriter)(dlm.startAsyncLogSegmentNonPartitioned()); // Write one record larger than max seg size. Ledger doesn't roll until next write. int txid = 1; LogRecord record = DLMTestUtil.getLogRecordInstance(txid++, 2048); Future<DLSN> result = writer.write(record); DLSN dlsn = Await.result(result, Duration.fromSeconds(10)); assertEquals(1, dlsn.getLogSegmentSequenceNo()); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, LogRecordTooLongException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); record = DLMTestUtil.getLogRecordInstance(txid++, MAX_LOGRECORD_SIZE + 1); result = writer.write(record); validateFutureFailed(result, WriteException.class); writer.closeAndComplete(); dlm.close(); }
@Test(timeout = 60000) public void testSimpleAsyncReadWriteWithMonitoredFuturePool() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(testConf); confLocal.setTaskExecutionWarnTimeMicros(1000); confLocal.setEnableTaskExecutionStats(true); simpleAsyncReadTest(name, confLocal); }