public GitFlowGraphMonitor(Config config, Optional<FSFlowCatalog> flowCatalog, FlowGraph graph, Map<URI, TopologySpec> topologySpecMap, CountDownLatch initComplete) { super(config.getConfig(GIT_FLOWGRAPH_MONITOR_PREFIX).withFallback(DEFAULT_FALLBACK)); this.flowCatalog = flowCatalog; this.flowGraph = graph; this.topologySpecMap = topologySpecMap; this.initComplete = initComplete; }
public Builder withDataFlowTopologyConfig(Config config) { Preconditions.checkArgument(config.hasPath(DATA_FLOW_TOPOLOGY), "missing required config entery " + DATA_FLOW_TOPOLOGY); this.dataFlowTopologyConfig = config.getConfig(DATA_FLOW_TOPOLOGY); return this; }
public Builder withDefaultDataFlowTopologyConfig_PushMode(Config config) { if (config.hasPath(DEFAULT_DATA_FLOW_TOPOLOGIES_PUSHMODE)) { this.defaultDataFlowTopology_PushModeConfig = Optional.of(config.getConfig(DEFAULT_DATA_FLOW_TOPOLOGIES_PUSHMODE)); } else { this.defaultDataFlowTopology_PushModeConfig = Optional.absent(); } return this; }
public Builder withDefaultDataFlowTopologyConfig_PullMode(Config config) { if (config.hasPath(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE)) { this.defaultDataFlowTopology_PullModeConfig = Optional.of(config.getConfig(DEFAULT_DATA_FLOW_TOPOLOGIES_PULLMODE)); } else { this.defaultDataFlowTopology_PullModeConfig = Optional.absent(); } return this; }
@Override protected Config getFactoryConfig(Config sysConfig) { return sysConfig.hasPath(CONFIG_PREFIX) ? sysConfig.getConfig(CONFIG_PREFIX) : ConfigFactory.empty(); }
@Override public Config getPreferredRoutes(Config allTopologies, EndPoint source) { Preconditions.checkArgument(source instanceof HadoopFsEndPoint, "source is NOT expectecd class " + HadoopFsEndPoint.class.getCanonicalName()); HadoopFsEndPoint hadoopFsSource = (HadoopFsEndPoint)source; String clusterName = hadoopFsSource.getClusterName(); Preconditions.checkArgument(allTopologies.hasPath(clusterName), "Can not find preferred topology for cluster name " + clusterName); return allTopologies.getConfig(clusterName); } }
public HadoopConfigLoader(Config rootConfig) { if (rootConfig.hasPath(HADOOP_CONF_OVERRIDES_ROOT)) { addOverrides(_conf, rootConfig.getConfig(HADOOP_CONF_OVERRIDES_ROOT)); } }
public Builder withReplicationSource(Config config) throws InstantiationException, IllegalAccessException, ClassNotFoundException { Preconditions.checkArgument(config.hasPath(REPLICATION_SOURCE), "missing required config entry " + REPLICATION_SOURCE); Config sourceConfig = config.getConfig(REPLICATION_SOURCE); String endPointFactory = sourceConfig.hasPath(END_POINT_FACTORY_CLASS) ? sourceConfig.getString(END_POINT_FACTORY_CLASS) : DEFAULT_END_POINT_FACTORY_CLASS; EndPointFactory factory = endPointFactoryResolver.resolveClass(endPointFactory).newInstance(); this.source = factory.buildSource(sourceConfig, this.selectionConfig); return this; }
public static ConfigAccessor createFromGlobalConfig(Config cfg) { Config localCfg = cfg.hasPath(CONFIG_PREFIX) ? cfg.getConfig(CONFIG_PREFIX) : ConfigFactory.empty(); return new ConfigAccessor(localCfg); } }
@Override public MultiAccessControlAction createRetentionAction(Config config, FileSystem fs, Config jobConfig) { Preconditions.checkArgument(this.canCreateWithConfig(config), "Can not create MultiAccessControlAction with config " + config.root().render(ConfigRenderOptions.concise())); if (config.hasPath(LEGACY_ACCESS_CONTROL_KEY)) { return new MultiAccessControlAction(config.getConfig(LEGACY_ACCESS_CONTROL_KEY), fs, jobConfig); } else if (config.hasPath(ACCESS_CONTROL_KEY)) { return new MultiAccessControlAction(config.getConfig(ACCESS_CONTROL_KEY), fs, jobConfig); } throw new IllegalStateException( "RetentionActionFactory.canCreateWithConfig returned true but could not create MultiAccessControlAction"); }
@Override public HadoopFsEndPoint buildSource(Config sourceConfig, Config selectionConfig) { Preconditions.checkArgument(sourceConfig.hasPath(HADOOP_FS_CONFIG_KEY), "missing required config entery " + HADOOP_FS_CONFIG_KEY); return new SourceHadoopFsEndPoint(new HadoopFsReplicaConfig(sourceConfig.getConfig(HADOOP_FS_CONFIG_KEY)), selectionConfig); }
@Override public HadoopFsEndPoint buildReplica(Config replicaConfig, String replicaName, Config selectionConfig) { Preconditions.checkArgument(replicaConfig.hasPath(HADOOP_FS_CONFIG_KEY), "missing required config entery " + HADOOP_FS_CONFIG_KEY); return new ReplicaHadoopFsEndPoint(new HadoopFsReplicaConfig(replicaConfig.getConfig(HADOOP_FS_CONFIG_KEY)), replicaName, selectionConfig); } }
/** Adds the entry listeners settings. */ private void addListeners() { for (String path : merged.getStringList("listeners")) { Config listener = root.getConfig(path); Factory<? extends CacheEntryListener<? super K, ? super V>> listenerFactory = factoryCreator.factoryOf(listener.getString("class")); Factory<? extends CacheEntryEventFilter<? super K, ? super V>> filterFactory = null; if (listener.hasPath("filter")) { filterFactory = factoryCreator.factoryOf(listener.getString("filter")); } boolean oldValueRequired = listener.getBoolean("old-value-required"); boolean synchronous = listener.getBoolean("synchronous"); configuration.addCacheEntryListenerConfiguration( new MutableCacheEntryListenerConfiguration<>( listenerFactory, filterFactory, oldValueRequired, synchronous)); } }
@Override public JobSpecMonitor forJobCatalog(GobblinInstanceDriver instanceDriver, MutableJobCatalog jobCatalog) throws IOException { Config config = instanceDriver.getSysConfig().getConfig().getConfig(CONFIG_PREFIX).withFallback(DEFAULTS); return forConfig(config, jobCatalog); }
/** Config accessor from a no namespaced typesafe config. */ public ConfigAccessor(Config cfg) { Config effectiveCfg = cfg.withFallback(getDefaultConfig().getConfig(CONFIG_PREFIX)); this.startTimeoutMs = effectiveCfg.getLong(START_TIMEOUT_MS); this.shutdownTimeoutMs = effectiveCfg.getLong(SHUTDOWN_TIMEOUT_MS); }
@Override public JobSpecMonitor forJobCatalog(GobblinInstanceDriver instanceDriver, MutableJobCatalog jobCatalog) throws IOException { Config config = instanceDriver.getSysConfig().getConfig().getConfig(CONFIG_PREFIX).withFallback(DEFAULTS); return forConfig(config, jobCatalog); }
@Test public void testFlattenedOrcConfig() throws Exception { String testConfFilePath = "convertibleHiveDatasetTest/flattenedOrc.conf"; Config config = ConfigFactory.parseResources(testConfFilePath).getConfig("hive.conversion.avro"); ConvertibleHiveDataset cd = createTestConvertibleDataset(config); Assert.assertEquals(cd.getDestFormats(), ImmutableSet.of("flattenedOrc")); Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent()); validateFlattenedConfig(cd.getConversionConfigForFormat("flattenedOrc").get()); }
public Simulator() { Config config = context().system().settings().config().getConfig("caffeine.simulator"); settings = new BasicSettings(config); List<Routee> routes = makeRoutes(); router = new Router(new BroadcastRoutingLogic(), routes); remaining = routes.size(); batchSize = settings.batchSize(); stopwatch = Stopwatch.createStarted(); reporter = settings.report().format().create(config); }
@Test public void testFlattenedAndNestedOrcConfig() throws Exception { String testConfFilePath = "convertibleHiveDatasetTest/flattenedAndNestedOrc.conf"; Config config = ConfigFactory.parseResources(testConfFilePath).getConfig("hive.conversion.avro"); ConvertibleHiveDataset cd = createTestConvertibleDataset(config); Assert.assertEquals(cd.getDestFormats(), ImmutableSet.of("flattenedOrc", "nestedOrc")); Assert.assertTrue(cd.getConversionConfigForFormat("flattenedOrc").isPresent()); Assert.assertTrue(cd.getConversionConfigForFormat("nestedOrc").isPresent()); validateFlattenedConfig(cd.getConversionConfigForFormat("flattenedOrc").get()); validateNestedOrc(cd.getConversionConfigForFormat("nestedOrc").get()); }