SchedulerEventSubscriberService() { // Time and stream size events are non-transactional super("scheduler.event", cConf.get(Constants.Scheduler.TIME_EVENT_TOPIC), cConf.getInt(Constants.Scheduler.TIME_EVENT_FETCH_SIZE), false); }
/** * Construct SchedulerQueueResolver with CConfiguration and Store. */ @Inject public SchedulerQueueResolver(CConfiguration cConf, NamespaceQueryAdmin namespaceQueryAdmin) { this.defaultQueue = cConf.get(Constants.AppFabric.APP_SCHEDULER_QUEUE, ""); this.systemQueue = cConf.get(Constants.Service.SCHEDULER_QUEUE, ""); this.namespaceQueryAdmin = namespaceQueryAdmin; }
private void checkPruningAndReplication(Set<String> problemKeys) { String hbaseDDLExtensionDir = cConf.get(Constants.HBaseDDLExecutor.EXTENSIONS_DIR); boolean pruningEnabled = cConf.getBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE); if (hbaseDDLExtensionDir != null && pruningEnabled) { LOG.error(" Invalid transaction list cannot be automatically pruned when replication is in use. " + "Please disable pruning by setting {} to false, or remove your custom HBase DDL executor from {}.", TxConstants.TransactionPruning.PRUNE_ENABLE, Constants.HBaseDDLExecutor.EXTENSIONS_DIR); problemKeys.add(Constants.HBaseDDLExecutor.EXTENSIONS_DIR); problemKeys.add(TxConstants.TransactionPruning.PRUNE_ENABLE); } }
/** * For guice injecting configuration object to this singleton. */ @Inject public void setConfiguration(CConfiguration config) { basePath = config.get(Constants.CFG_DATA_LEVELDB_DIR); Preconditions.checkNotNull(basePath, "No base directory configured for LevelDB."); blockSize = config.getInt(Constants.CFG_DATA_LEVELDB_BLOCKSIZE, Constants.DEFAULT_DATA_LEVELDB_BLOCKSIZE); cacheSize = config.getLong(Constants.CFG_DATA_LEVELDB_CACHESIZE, Constants.DEFAULT_DATA_LEVELDB_CACHESIZE); writeOptions = new WriteOptions().sync( config.getBoolean(Constants.CFG_DATA_LEVELDB_FSYNC, Constants.DEFAULT_DATA_LEVELDB_FSYNC)); }
@Inject DefaultAuditPublisher(CConfiguration cConf, MessagingService messagingService) { this.messagingService = messagingService; this.auditTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Audit.TOPIC)); this.retryStrategy = RetryStrategies.timeLimit( cConf.getLong(Constants.Audit.PUBLISH_TIMEOUT_MS), TimeUnit.MILLISECONDS, RetryStrategies.exponentialDelay(10, 200, TimeUnit.MILLISECONDS)); }
/** * Checks if the store provider is none. Returns true if the provider value is set to none. */ public static boolean isNone(final CConfiguration cConf) { return NONE.equalsIgnoreCase(cConf.get(Constants.Security.Store.PROVIDER)); }
DataEventSubscriberService() { // Dataset partition events are published transactionally, hence fetch need to be transactional too. super("scheduler.data.event", cConf.get(Constants.Dataset.DATA_EVENT_TOPIC), cConf.getInt(Constants.Scheduler.DATA_EVENT_FETCH_SIZE), true); }
public ProgramStateWriterWithHeartBeat(ProgramRunId programRunId, ProgramStateWriter programStateWriter, MessagingService messagingService, CConfiguration cConf) { this(programRunId, programStateWriter, cConf.getLong(Constants.ProgramHeartbeat.HEARTBEAT_INTERVAL_SECONDS), new MessagingProgramStatePublisher(messagingService, NamespaceId.SYSTEM.topic(cConf.get( Constants.AppFabric.PROGRAM_STATUS_EVENT_TOPIC)), RetryStrategies.fromConfiguration(cConf, "system.program.state."))); }
@VisibleForTesting @Inject public ProgramRuntimeProviderLoader(CConfiguration cConf) { super(cConf.get(Constants.AppFabric.RUNTIME_EXT_DIR, "")); this.cConf = cConf; }
ProgramStatusEventSubscriberService() { // Fetch transactionally since publishing from AppMetadataStore is transactional. super("scheduler.program.event", cConf.get(Constants.AppFabric.PROGRAM_STATUS_RECORD_EVENT_TOPIC), cConf.getInt(Constants.Scheduler.PROGRAM_STATUS_EVENT_FETCH_SIZE), true); }
private void resetShutdownTime() { File shutdownTimeFile = new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR), Constants.Replication.CDAP_SHUTDOWN_TIME_FILENAME).getAbsoluteFile(); if (shutdownTimeFile.exists() && !shutdownTimeFile.delete()) { LOG.error("Failed to reset shutdown time file {}", shutdownTimeFile); } }
private TMSLogPublisher(CConfiguration cConf, MessagingService messagingService, int queueSize) { super(queueSize, RetryStrategies.fromConfiguration(cConf, "system.log.process.")); this.topicPrefix = cConf.get(Constants.Logging.TMS_TOPIC_PREFIX); this.numPartitions = cConf.getInt(Constants.Logging.NUM_PARTITIONS); this.loggingEventSerializer = new LoggingEventSerializer(); this.logPartitionType = LogPartitionType.valueOf(cConf.get(Constants.Logging.LOG_PUBLISH_PARTITION_KEY).toUpperCase()); this.messagingContext = new MultiThreadMessagingContext(messagingService); }
@Inject public DefaultProvisionerConfigProvider(CConfiguration cConf) { String extDirectory = cConf.get(Constants.Provisioner.EXTENSIONS_DIR); this.extDirs = ImmutableList.copyOf(Splitter.on(';').omitEmptyStrings().trimResults().split(extDirectory)); }
@Provides @Named(Constants.Service.MASTER_SERVICES_BIND_ADDRESS) @SuppressWarnings("unused") public InetAddress providesHostname(CConfiguration cConf) { String address = cConf.get(Constants.Service.MASTER_SERVICES_BIND_ADDRESS); return Networks.resolve(address, new InetSocketAddress("localhost", 0).getAddress()); }
@Inject SparkServiceAnnouncer(CConfiguration cConf, ZKClient zKClient, ProgramId programId) { // Use the ZK path that points to the Twill application of the Spark client. String ns = String.format("%s/%s", cConf.get(Constants.CFG_TWILL_ZK_NAMESPACE), ServiceDiscoverable.getName(programId)); this.zkClient = ZKClients.namespace(zKClient, ns); }
@Inject MessagingWorkflowStateWriter(CConfiguration cConf, MessagingService messagingService) { this.topic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Metadata.MESSAGING_TOPIC)); this.messagingService = messagingService; this.retryStrategy = RetryStrategies.fromConfiguration(cConf, "system.metadata."); }
@Override public MetricsConsumerMetaTable createConsumerMeta() { String tableName = cConf.get(Constants.Metrics.METRICS_META_TABLE); MetricsTable table = getOrCreateMetricsTable(tableName, DatasetProperties.EMPTY); return new MetricsConsumerMetaTable(table); }
@Inject public MessagingProgramStateWriter(CConfiguration cConf, MessagingService messagingService) { this.programStatePublisher = new MessagingProgramStatePublisher(messagingService, NamespaceId.SYSTEM.topic(cConf.get( Constants.AppFabric.PROGRAM_STATUS_EVENT_TOPIC)), RetryStrategies.fromConfiguration(cConf, "system.program.state.")); }
@Override protected String getTableNameAsString(TableId tableId) { Preconditions.checkArgument(tableId != null, "TableId should not be null."); String tablePrefix = cConf.get(Constants.Dataset.TABLE_PREFIX); if (NamespaceId.DEFAULT.getNamespace().equals(tableId.getNamespace())) { return HTableNameConverter.toHBaseTableName(tablePrefix, tableId); } return Joiner.on(':').join(tableId.getNamespace(), HTableNameConverter.toHBaseTableName(tablePrefix, tableId)); }
@Test(expected = DataSetException.class) public void testAbsolutePathInsideCDAPDouble() throws IOException, DatasetManagementException { // test that it rejects also paths that have // in them String absolutePath = dsFrameworkUtil.getConfiguration() .get(Constants.CFG_LOCAL_DATA_DIR).replace("/", "//").concat("/hello"); dsFrameworkUtil.createInstance("fileSet", DatasetFrameworkTestUtil.NAMESPACE_ID.dataset("badFileSet"), FileSetProperties.builder().setBasePath(absolutePath).build()); }