public static Long getBlockCacheSize(Config storeConfig, int numTasksForContainer) { long cacheSize = storeConfig.getLong("container.cache.size.bytes", 100 * 1024 * 1024L); return cacheSize / numTasksForContainer; } }
public static Long getBlockCacheSize(Config storeConfig, int numTasksForContainer) { long cacheSize = storeConfig.getLong("container.cache.size.bytes", 100 * 1024 * 1024L); return cacheSize / numTasksForContainer; } }
public static Long getBlockCacheSize(Config storeConfig, int numTasksForContainer) { long cacheSize = storeConfig.getLong("container.cache.size.bytes", 100 * 1024 * 1024L); return cacheSize / numTasksForContainer; } }
public SamzaSqlApplicationConfig(Config staticConfig, Set<String> inputSystemStreams, Set<String> outputSystemStreams) { ioResolver = createIOResolver(staticConfig); inputSystemStreamConfigBySource = inputSystemStreams.stream() .collect(Collectors.toMap(Function.identity(), src -> ioResolver.fetchSourceInfo(src))); outputSystemStreamConfigsBySource = outputSystemStreams.stream() .collect(Collectors.toMap(Function.identity(), x -> ioResolver.fetchSinkInfo(x))); systemStreamConfigsBySource = new HashMap<>(inputSystemStreamConfigBySource); systemStreamConfigsBySource.putAll(outputSystemStreamConfigsBySource); Set<SqlIOConfig> systemStreamConfigs = new HashSet<>(systemStreamConfigsBySource.values()); relSchemaProvidersBySource = systemStreamConfigs.stream() .collect(Collectors.toMap(SqlIOConfig::getSource, x -> initializePlugin("RelSchemaProvider", x.getRelSchemaProviderName(), staticConfig, CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN, (o, c) -> ((RelSchemaProviderFactory) o).create(x.getSystemStream(), c)))); samzaRelConvertersBySource = systemStreamConfigs.stream() .collect(Collectors.toMap(SqlIOConfig::getSource, x -> initializePlugin("SamzaRelConverter", x.getSamzaRelConverterName(), staticConfig, CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, (o, c) -> ((SamzaRelConverterFactory) o).create(x.getSystemStream(), relSchemaProvidersBySource.get(x.getSource()), c)))); udfResolver = createUdfResolver(staticConfig); udfMetadata = udfResolver.getUdfs(); windowDurationMs = staticConfig.getLong(CFG_GROUPBY_WINDOW_DURATION_MS, DEFAULT_GROUPBY_WINDOW_DURATION_MS); // remove the SqlIOConfigs of outputs whose system is "log" out of systemStreamConfigsBySource outputSystemStreamConfigsBySource.forEach((k, v) -> { if (k.split("\\.")[0].equals(SamzaSqlApplicationConfig.SAMZA_SYSTEM_LOG)) { systemStreamConfigsBySource.remove(k); } }); }
windowDurationMs = staticConfig.getLong(CFG_GROUPBY_WINDOW_DURATION_MS, DEFAULT_GROUPBY_WINDOW_DURATION_MS);
public TestAvroSystemConsumer(String systemName, Config config) { numMessages = config.getInt(String.format("systems.%s.%s", systemName, CFG_NUM_MESSAGES), DEFAULT_NUM_EVENTS); includeNullForeignKeys = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_FOREIGN_KEYS), false); includeNullSimpleRecords = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_SIMPLE_RECORDS), false); sleepBetweenPollsMs = config.getLong(String.format("systems.%s.%s", systemName, CFG_SLEEP_BETWEEN_POLLS_MS), 0); }
public static Options options(Config storeConfig, int numTasksForContainer) { Options options = new Options(); Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024); options.setErrorIfExists(false); options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L)); options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
public ConfigManager(Config config) { //get rm address and port if (!config.containsKey(rmAddressOpt) || !config.containsKey(rmPortOpt)) { throw new IllegalArgumentException("Missing config: the config file does not contain the rm host or port."); } String rmAddress = config.get(rmAddressOpt); int rmPort = config.getInt(rmPortOpt); //get job name and id; if (!config.containsKey(JobConfig.JOB_NAME())) { throw new IllegalArgumentException("Missing config: the config does not contain the job name"); } jobName = config.get(JobConfig.JOB_NAME()); jobID = config.getInt(JobConfig.JOB_ID(), 1); //set polling interval if (config.containsKey(pollingIntervalOpt)) { long pollingInterval = config.getLong(pollingIntervalOpt); if (pollingInterval <= 0) { throw new IllegalArgumentException("polling interval cannot be a negative value"); } this.interval = pollingInterval; } else { this.interval = defaultPollingInterval; } this.config = config; this.coordinatorStreamConsumer = new CoordinatorStreamSystemConsumer(config, new MetricsRegistryMap()); this.yarnUtil = new YarnUtil(rmAddress, rmPort); }
public static Options options(Config storeConfig, int numTasksForContainer, File storeDir, StorageEngineFactory.StoreMode storeMode) { Options options = new Options(); Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024); options.setErrorIfExists(false); options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L)); options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
public static Options options(Config storeConfig, int numTasksForContainer) { Options options = new Options(); Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024); options.setErrorIfExists(false); options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L)); options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));