public static MetricsConfig getMetricsConfig() { val builder = MetricsConfig.builder(); for (Map.Entry<String, ConfigValue> e : CONFIG.entrySet()) { if (e.getKey().startsWith(METRIC_PATH)) { builder.with(Property.named(e.getKey().replaceFirst(METRIC_PATH, "")), e.getValue().unwrapped()); } } return builder.build(); } }
static DurableLogConfig createDurableLogConfig(Integer checkpointMinCommitCount, Long checkpointMinTotalCommitLength) { if (checkpointMinCommitCount == null) { checkpointMinCommitCount = Integer.MAX_VALUE; } if (checkpointMinTotalCommitLength == null) { checkpointMinTotalCommitLength = Long.MAX_VALUE; } return DurableLogConfig .builder() .with(DurableLogConfig.CHECKPOINT_MIN_COMMIT_COUNT, CHECKPOINT_MIN_COMMIT_COUNT) .with(DurableLogConfig.CHECKPOINT_COMMIT_COUNT, checkpointMinCommitCount) .with(DurableLogConfig.CHECKPOINT_TOTAL_COMMIT_LENGTH, checkpointMinTotalCommitLength) .with(DurableLogConfig.START_RETRY_DELAY_MILLIS, START_RETRY_DELAY_MILLIS) .build(); } }
/** * Gets a default set of configuration values, in absence of any real configuration. * These configuration values are the default ones from all component configurations, except that it will * create only one container to host segments. */ public static ServiceBuilderConfig getDefaultConfig() { // All component configs should have defaults built-in, so no need to override them here. return new Builder() .include(ServiceConfig.builder().with(ServiceConfig.CONTAINER_COUNT, 1)) .build(); }
@Before public void setUp() throws Exception { this.baseDir = Files.createTempDirectory("test_hdfs").toFile().getAbsoluteFile(); this.hdfsCluster = HDFSClusterHelpers.createMiniDFSCluster(this.baseDir.getAbsolutePath()); this.adapterConfig = HDFSStorageConfig .builder() .with(HDFSStorageConfig.REPLICATION, 1) .with(HDFSStorageConfig.URL, String.format("hdfs://localhost:%d/", hdfsCluster.getNameNodePort())) .build(); }
@Before public void setUp() throws Exception { this.baseDir = Files.createTempDirectory("test_nfs").toFile().getAbsoluteFile(); this.adapterConfig = FileSystemStorageConfig .builder() .with(FileSystemStorageConfig.ROOT, this.baseDir.getAbsolutePath()) .build(); }
@Before public void setUp() { this.tempDir.set(Files.createTempDir()); this.config.set(RocksDBConfig.builder().with(RocksDBConfig.DATABASE_DIR, tempDir.get().getAbsolutePath()).build()); this.factory.set(new RocksDBCacheFactory(this.config.get())); }
@Before public void setUp() throws Exception { this.baseDir = Files.createTempDirectory("test_hdfs").toFile().getAbsoluteFile(); this.hdfsCluster = HDFSClusterHelpers.createMiniDFSCluster(this.baseDir.getAbsolutePath()); this.adapterConfig = HDFSStorageConfig .builder() .with(HDFSStorageConfig.REPLICATION, 1) .with(HDFSStorageConfig.URL, String.format("hdfs://localhost:%d/", hdfsCluster.getNameNodePort())) .build(); }
/** * Starts BookKeeper. */ @Before public void setUp() throws Exception { super.setUp(); endpoint = "http://127.0.0.1:" + TestUtils.getAvailableListenPort(); URI uri = URI.create(endpoint); filesystemS3 = new S3FileSystemImpl(getBaseDir().toString()); this.configBuilder.include(ExtendedS3StorageConfig.builder() .with(ExtendedS3StorageConfig.BUCKET, "kanpravegatest") .with(ExtendedS3StorageConfig.ACCESS_KEY_ID, "x") .with(ExtendedS3StorageConfig.SECRET_KEY, "x") .with(ExtendedS3StorageConfig.URI, endpoint)); }
@Before public void setUp() throws Exception { this.baseDir = Files.createTempDirectory("test_nfs").toFile().getAbsoluteFile(); MetricsConfig metricsConfig = MetricsConfig.builder().with(MetricsConfig.ENABLE_STATISTICS, true).build(); MetricsProvider.initialize(metricsConfig); this.adapterConfig = FileSystemStorageConfig .builder() .with(FileSystemStorageConfig.ROOT, this.baseDir.getAbsolutePath()) .build(); }
@Test public void testQuorumSize() { AssertExtensions.assertThrows("BookKeeperConfig did not throw InvalidPropertyValueException", () -> BookKeeperConfig.builder() .with(BookKeeperConfig.BK_ACK_QUORUM_SIZE, 3) .with(BookKeeperConfig.BK_WRITE_QUORUM_SIZE, 2) .build(), ex -> ex instanceof InvalidPropertyValueException); }
private MetadataCheckpointPolicy getNoOpCheckpointPolicy() { // Turn off any MetadataCheckpointing. In these tests, we are doing that manually. DurableLogConfig dlConfig = DurableLogConfig .builder() .with(DurableLogConfig.CHECKPOINT_COMMIT_COUNT, Integer.MAX_VALUE) .with(DurableLogConfig.CHECKPOINT_TOTAL_COMMIT_LENGTH, Long.MAX_VALUE) .build(); return new MetadataCheckpointPolicy(dlConfig, Runnables.doNothing(), executorService()); }
/** * Creates a ServiceBuilderConfig based on the given builder, by attaching the correct RocksDB file path based on the instance. * * @param configBuilder The ServiceBuilderConfig.Builder to base from (this builder will not be touched). * @param instanceId The instance id of the Service to build (for least interference, different instances should have * different Ids so that shared resources (i.e., RocksDB) can be setup appropriately). * @return A ServiceBuilderConfig instance. */ protected ServiceBuilderConfig getBuilderConfig(ServiceBuilderConfig.Builder configBuilder, int instanceId) { String id = Integer.toString(instanceId); return configBuilder .makeCopy() .include(ServiceConfig.builder().with(ServiceConfig.INSTANCE_ID, id)) .include(RocksDBConfig.builder().with(RocksDBConfig.DATABASE_DIR, Paths.get(getRocksDBDir().toString(), id).toString())) .build(); }
/** * Starts BookKeeper and HDFS MiniCluster. */ @Before public void setUp() throws Exception { super.setUp(); this.hdfsCluster = HDFSClusterHelpers.createMiniDFSCluster(getBaseDir().getAbsolutePath()); this.configBuilder.include(HDFSStorageConfig .builder() .with(HDFSStorageConfig.REPLICATION, 1) .with(HDFSStorageConfig.URL, String.format("hdfs://localhost:%d/", hdfsCluster.getNameNodePort()))); }
/** * Starts BookKeeper. */ @Before public void setUp() throws Exception { super.setUp(); this.configBuilder.include(FileSystemStorageConfig.builder() .with(FileSystemStorageConfig.ROOT, getBaseDir().getAbsolutePath())); }
@Test public void testZkHierarchyDepth() { AssertExtensions.assertThrows("BookKeeperConfig did not throw InvalidPropertyValueException", () -> BookKeeperConfig.builder() .with(BookKeeperConfig.ZK_HIERARCHY_DEPTH, -1) .build(), ex -> ex instanceof InvalidPropertyValueException); } }
@Before public void setUp() { MetricsProvider.initialize(MetricsConfig.builder() .with(MetricsConfig.ENABLE_STATISTICS, true) .build()); }
private static ServiceBuilderConfig getReadOnlyBuilderConfig() { val baseConfig = getBuilderConfig(); val props = new Properties(); baseConfig.forEach(props::put); return ServiceBuilderConfig.builder() .include(props) .include(ServiceConfig.builder() .with(ServiceConfig.READONLY_SEGMENT_STORE, true)) .build(); }
private ServiceBuilder createReadOnlyBuilder(int instanceId) throws Exception { // Copy base config properties to a new object. val props = new Properties(); this.configBuilder.build().forEach(props::put); // Create a new config (so we don't alter the base one) and set the ReadOnlySegmentStore to true). val configBuilder = ServiceBuilderConfig.builder() .include(props) .include(ServiceConfig.builder() .with(ServiceConfig.READONLY_SEGMENT_STORE, true)); val builder = createBuilder(configBuilder, instanceId); builder.initialize(); return builder; }
TestContext() { this.cacheFactory = new InMemoryCacheFactory(); this.storage = InMemoryStorageFactory.newStorage(executorService()); this.storage.initialize(1); this.metadata = new MetadataBuilder(CONTAINER_ID).build(); ReadIndexConfig readIndexConfig = ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, 1024).build(); this.cacheManager = new CacheManager(CachePolicy.INFINITE, executorService()); this.readIndex = new ContainerReadIndex(readIndexConfig, this.metadata, this.cacheFactory, this.storage, this.cacheManager, executorService()); this.memoryLog = new SequencedItemList<>(); this.stateUpdater = new MemoryStateUpdater(this.memoryLog, this.readIndex, Runnables.doNothing()); }