@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); try { String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); GrouperMetadata grouperMetadata = new GrouperMetadataImpl(ImmutableMap.of(String.valueOf(containerId), locationId), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); return JobModelManager.readJobModel(this.config, Collections.emptyMap(), streamMetadataCache, grouperMetadata); } finally { systemAdmins.stop(); } }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
public TestAvroSystemConsumer(String systemName, Config config) { numMessages = config.getInt(String.format("systems.%s.%s", systemName, CFG_NUM_MESSAGES), DEFAULT_NUM_EVENTS); includeNullForeignKeys = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_FOREIGN_KEYS), false); includeNullSimpleRecords = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_SIMPLE_RECORDS), false); sleepBetweenPollsMs = config.getLong(String.format("systems.%s.%s", systemName, CFG_SLEEP_BETWEEN_POLLS_MS), 0); }
@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); /** TODO: Locality Manager seems to be required in JC for reading locality info and grouping tasks intelligently and also, in SamzaContainer for writing locality info to the coordinator stream. This closely couples together TaskNameGrouper with the LocalityManager! Hence, groupers should be a property of the jobcoordinator (job.coordinator.task.grouper, instead of task.systemstreampartition.grouper) */ JobModel jobModel = JobModelManager.readJobModel(this.config, Collections.emptyMap(), null, streamMetadataCache, Collections.singletonList(containerId)); systemAdmins.stop(); return jobModel; }
@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); /** TODO: Locality Manager seems to be required in JC for reading locality info and grouping tasks intelligently and also, in SamzaContainer for writing locality info to the coordinator stream. This closely couples together TaskNameGrouper with the LocalityManager! Hence, groupers should be a property of the jobcoordinator (job.coordinator.task.grouper, instead of task.systemstreampartition.grouper) */ JobModel jobModel = JobModelManager.readJobModel(this.config, Collections.emptyMap(), null, streamMetadataCache, Collections.singletonList(containerId)); systemAdmins.stop(); return jobModel; }
@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); /** TODO: Locality Manager seems to be required in JC for reading locality info and grouping tasks intelligently and also, in SamzaContainer for writing locality info to the coordinator stream. This closely couples together TaskNameGrouper with the LocalityManager! Hence, groupers should be a property of the jobcoordinator (job.coordinator.task.grouper, instead of task.systemstreampartition.grouper) */ JobModel jobModel = JobModelManager.readJobModel(this.config, Collections.emptyMap(), null, streamMetadataCache, Collections.singletonList(containerId)); systemAdmins.stop(); return jobModel; }
@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); /** TODO: Locality Manager seems to be required in JC for reading locality info and grouping tasks intelligently and also, in SamzaContainer for writing locality info to the coordinator stream. This closely couples together TaskNameGrouper with the LocalityManager! Hence, groupers should be a property of the jobcoordinator (job.coordinator.task.grouper, instead of task.systemstreampartition.grouper) */ JobModel jobModel = JobModelManager.readJobModel(this.config, Collections.emptyMap(), null, streamMetadataCache, Collections.singletonList(containerId)); systemAdmins.stop(); return jobModel; }
int partitions = config.getInt(defaultPartitionsConfigProperty, StreamEdge.PARTITIONS_UNKNOWN); if (partitions == StreamEdge.PARTITIONS_UNKNOWN) {
int partitions = config.getInt(defaultPartitionsConfigProperty, StreamEdge.PARTITIONS_UNKNOWN); if (partitions == StreamEdge.PARTITIONS_UNKNOWN) {
int partitions = config.getInt(defaultPartitionsConfigProperty, StreamEdge.PARTITIONS_UNKNOWN); if (partitions == StreamEdge.PARTITIONS_UNKNOWN) {
int partitions = config.getInt(defaultPartitionsConfigProperty, StreamEdge.PARTITIONS_UNKNOWN); if (partitions == StreamEdge.PARTITIONS_UNKNOWN) {
int partitions = config.getInt(defaultPartitionsConfigProperty, StreamEdge.PARTITIONS_UNKNOWN); if (partitions == StreamEdge.PARTITIONS_UNKNOWN) {
public ConfigManager(Config config) { //get rm address and port if (!config.containsKey(rmAddressOpt) || !config.containsKey(rmPortOpt)) { throw new IllegalArgumentException("Missing config: the config file does not contain the rm host or port."); } String rmAddress = config.get(rmAddressOpt); int rmPort = config.getInt(rmPortOpt); //get job name and id; if (!config.containsKey(JobConfig.JOB_NAME())) { throw new IllegalArgumentException("Missing config: the config does not contain the job name"); } jobName = config.get(JobConfig.JOB_NAME()); jobID = config.getInt(JobConfig.JOB_ID(), 1); //set polling interval if (config.containsKey(pollingIntervalOpt)) { long pollingInterval = config.getLong(pollingIntervalOpt); if (pollingInterval <= 0) { throw new IllegalArgumentException("polling interval cannot be a negative value"); } this.interval = pollingInterval; } else { this.interval = defaultPollingInterval; } this.config = config; this.coordinatorStreamConsumer = new CoordinatorStreamSystemConsumer(config, new MetricsRegistryMap()); this.yarnUtil = new YarnUtil(rmAddress, rmPort); }
@Override public void init(Context context) throws Exception { Config config = context.getJobContext().getConfig(); this.expectedMessageCount = config.getInt("app.messageCount"); this.outputTopic = config.get("app.outputTopic", "output"); this.outputSystem = config.get("app.outputSystem", "test-system"); }
int blockSize = storeConfig.getInt(ROCKSDB_BLOCK_SIZE_BYTES, 4096); BlockBasedTableConfig tableOptions = new BlockBasedTableConfig(); tableOptions.setBlockCacheSize(blockCacheSize).setBlockSize(blockSize); options.setMaxWriteBufferNumber(storeConfig.getInt(ROCKSDB_NUM_WRITE_BUFFERS, 3)); options.setCreateIfMissing(true); options.setErrorIfExists(false);
int blockSize = storeConfig.getInt(ROCKSDB_BLOCK_SIZE_BYTES, 4096); BlockBasedTableConfig tableOptions = new BlockBasedTableConfig(); tableOptions.setBlockCacheSize(blockCacheSize).setBlockSize(blockSize); options.setMaxWriteBufferNumber(storeConfig.getInt(ROCKSDB_NUM_WRITE_BUFFERS, 3)); options.setCreateIfMissing(true); options.setErrorIfExists(false);