/** * Return string value at <code>path</code> if <code>config</code> has path. If not return an empty string * * @param config in which the path may be present * @param path key to look for in the config object * @return string value at <code>path</code> if <code>config</code> has path. If not return an empty string */ public static String emptyIfNotPresent(Config config, String path) { return getString(config, path, StringUtils.EMPTY); }
private String getAzkabanPassword(Config config) { if (StringUtils.isNotBlank(System.getProperty(ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_SYSTEM_KEY))) { return System.getProperty(ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_SYSTEM_KEY); } return ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PASSWORD_KEY, StringUtils.EMPTY); }
@VisibleForTesting AccessControlAction(Config actionConfig, FileSystem fs, Config jobConfig) { super(actionConfig, fs, jobConfig); this.permission = actionConfig.hasPath(MODE_KEY) ? Optional.of(new FsPermission(actionConfig.getString(MODE_KEY))) : Optional .<FsPermission> absent(); this.owner = Optional.fromNullable(ConfigUtils.getString(actionConfig, OWNER_KEY, null)); this.group = Optional.fromNullable(ConfigUtils.getString(actionConfig, GROUP_KEY, null)); this.selectionPolicy = createSelectionPolicy(actionConfig, jobConfig); }
/** * Build the {@link HelixManager} for the Application Master. */ protected static HelixManager buildHelixManager(Config config, String zkConnectionString, String clusterName, InstanceType type) { String helixInstanceName = ConfigUtils.getString(config, GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_KEY, GobblinClusterManager.class.getSimpleName()); return HelixManagerFactory.getZKHelixManager( config.getString(clusterName), helixInstanceName, type, zkConnectionString); }
@Override protected AsyncRequestBuilder<GenericRecord, RestRequest> createRequestBuilder(Config config) { String urlTemplate = config.getString(HttpConstants.URL_TEMPLATE); String verb = config.getString(HttpConstants.VERB); String protocolVersion = ConfigUtils.getString(config, HttpConstants.PROTOCOL_VERSION, DEFAULT_PROTOCOL_VERSION); return new R2RestRequestBuilder(urlTemplate, verb, protocolVersion); }
@Override public DatasetAwareMetadataProvider createMetadataProvider(Config metaConfig) { String permission = ConfigUtils.getString(metaConfig, ALL_PERMISSOIN, ""); log.info("User defined permission is: " + permission); return new SimpleConfigMetadataProvider(permission); } }
private static ErrorType getType(Config config) { String type = ConfigUtils.getString(config, ERROR_TYPE_CONFIGURATION_KEY, ""); ErrorType errorType; if (!type.isEmpty()) { errorType = ErrorType.valueOf(type.toUpperCase()); } else { errorType = DEFAULT_ERROR_TYPE; } return errorType; }
public FSDatasetDescriptor(Config config) { Preconditions.checkArgument(config.hasPath(DatasetDescriptorConfigKeys.PLATFORM_KEY), "Dataset descriptor config must specify platform"); this.platform = config.getString(DatasetDescriptorConfigKeys.PLATFORM_KEY); this.path = PathUtils.getPathWithoutSchemeAndAuthority(new Path(ConfigUtils.getString(config, DatasetDescriptorConfigKeys.PATH_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY))).toString(); this.formatConfig = new FormatConfig(config); this.isRetentionApplied = ConfigUtils.getBoolean(config, DatasetDescriptorConfigKeys.IS_RETENTION_APPLIED_KEY, false); this.description = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.DESCRIPTION_KEY, ""); this.rawConfig = config.withFallback(this.formatConfig.getRawConfig()).withFallback(DEFAULT_FALLBACK); }
/** * Build the {@link HelixManager} for the Service Master. */ private HelixManager buildHelixManager(Config config, String zkConnectionString) { String helixClusterName = config.getString(ServiceConfigKeys.HELIX_CLUSTER_NAME_KEY); String helixInstanceName = ConfigUtils.getString(config, ServiceConfigKeys.HELIX_INSTANCE_NAME_KEY, GobblinServiceManager.class.getSimpleName()); LOGGER.info("Creating Helix cluster if not already present [overwrite = false]: " + zkConnectionString); HelixUtils.createGobblinHelixCluster(zkConnectionString, helixClusterName, false); return HelixUtils.buildHelixManager(helixInstanceName, helixClusterName, zkConnectionString); }
public static String constructProjectName(JobSpec jobSpec, Config config) { String projectNamePrefix = ConfigUtils.getString(config, ServiceAzkabanConfigKeys.AZKABAN_PROJECT_NAME_PREFIX_KEY, ""); String projectNamePostfix = null == jobSpec.getUri() ? "" : jobSpec.getUri().toString().replaceAll("_", "-").replaceAll("[^A-Za-z0-9\\-]", "_"); return trimProjectName(String.format("%s_%s", projectNamePrefix, projectNamePostfix)); }
private ConversionConfig(Config config, Table table, String destinationFormat) { super(config, table); // Required this.destinationFormat = destinationFormat; // Optional this.destinationViewName = Optional.fromNullable(resolveTemplate(ConfigUtils.getString(config, DESTINATION_VIEW_KEY, null), table)); this.updateViewAlwaysEnabled = ConfigUtils.getBoolean(config, UPDATE_VIEW_ALWAYS_ENABLED, true); }
public FsAuditSink(Config config, ValueAuditRuntimeMetadata auditMetadata) throws IOException { this.auditDirPath = new Path(ConfigUtils.getString(config, FS_SINK_AUDIT_OUTPUT_PATH_KEY, FS_SINK_AUDIT_OUTPUT_DEFAULT_PATH)); this.fs = this.auditDirPath.getFileSystem(new Configuration()); this.auditMetadata = auditMetadata; this.auditFileOutputStream = closer.register(fs.create(getAuditFilePath())); DataFileWriter<GenericRecord> dataFileWriter = this.closer.register(new DataFileWriter<GenericRecord>(new GenericDatumWriter<GenericRecord>())); this.writer = this.closer.register(dataFileWriter.create(this.auditMetadata.getTableMetadata().getTableSchema(), this.auditFileOutputStream)); }
/** * return an identifier for the data source based on the configuration * @param config configuration * @return a {@link String} to identify the data source */ public static String getDataSourceId(Config config) { PasswordManager passwordManager = PasswordManager.getInstance(ConfigUtils.configToProperties(config)); return ConfigUtils.getString(config, ConfigurationKeys.STATE_STORE_DB_JDBC_DRIVER_KEY, ConfigurationKeys.DEFAULT_STATE_STORE_DB_JDBC_DRIVER) + "::" + config.getString(ConfigurationKeys.STATE_STORE_DB_URL_KEY) + "::" + passwordManager.readPassword(config.getString(ConfigurationKeys.STATE_STORE_DB_USER_KEY)); }
public FormatConfig(Config config) { this.format = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.FORMAT_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); this.codecType = ConfigUtils.getString(config, DatasetDescriptorConfigKeys.CODEC_KEY, DatasetDescriptorConfigKeys.DATASET_DESCRIPTOR_CONFIG_ANY); this.encryptionConfig = new EncryptionConfig(ConfigUtils.getConfig(config, DatasetDescriptorConfigKeys.ENCYPTION_PREFIX, ConfigFactory .empty())); this.rawConfig = config.withFallback(this.encryptionConfig.getRawConfig().atPath(DatasetDescriptorConfigKeys.ENCYPTION_PREFIX)). withFallback(DEFAULT_FALLBACK); }
public SimpleKafkaSpecProducer(Config config, Optional<Logger> log) { _kafkaProducerClassName = ConfigUtils.getString(config, KAFKA_DATA_WRITER_CLASS_KEY, DEFAULT_KAFKA_DATA_WRITER_CLASS); try { _serializer = new AvroBinarySerializer<>(AvroJobSpec.SCHEMA$, new FixedSchemaVersionWriter()); _config = config; } catch (IOException e) { throw new RuntimeException("Could not create AvroBinarySerializer", e); } }
@Override public Schema convertSchema(Schema inputSchema, WorkUnitState workUnit) throws SchemaConversionException { Preconditions.checkArgument(inputSchema.getFields().equals(gobblinTrackingEventSchema.getFields())); Schema outputSchema = Schema .createRecord(ConfigUtils.getString(config, NEW_SCHEMA_NAME, inputSchema.getName()), inputSchema.getDoc(), inputSchema.getNamespace(), inputSchema.isError()); outputSchema.setFields(newFields); return outputSchema; }
public TimeBasedDatasetStoreDataset(Key key, List<DatasetStateStoreEntryManager> entries, Properties props) { super(key, entries); this.versionFinder = new TimestampedDatasetStateStoreVersionFinder(); Config propsAsConfig = ConfigUtils.propertiesToConfig(props); // strip the retention config namespace since the selection policy looks for configuration without the namespace Config retentionConfig = ConfigUtils.getConfigOrEmpty(propsAsConfig, ConfigurableCleanableDataset.RETENTION_CONFIGURATION_KEY); Config retentionConfigWithFallback = retentionConfig.withFallback(propsAsConfig); this.versionSelectionPolicy = createSelectionPolicy(ConfigUtils.getString(retentionConfigWithFallback, SELECTION_POLICY_CLASS_KEY, DEFAULT_SELECTION_POLICY_CLASS), retentionConfigWithFallback, props); }
static FailureOption getFailureOption(Dag<JobExecutionPlan> dag) { if (dag.isEmpty()) { return null; } DagNode<JobExecutionPlan> dagNode = dag.getStartNodes().get(0); String failureOption = ConfigUtils.getString(getJobConfig(dagNode), ConfigurationKeys.FLOW_FAILURE_OPTION, DagManager.DEFAULT_FLOW_FAILURE_OPTION); return FailureOption.valueOf(failureOption); } }
private Kafka08ConsumerClient(Config config) { super(config); bufferSize = ConfigUtils.getInt(config, CONFIG_KAFKA_BUFFER_SIZE_BYTES, CONFIG_KAFKA_BUFFER_SIZE_BYTES_DEFAULT); clientName = ConfigUtils.getString(config, CONFIG_KAFKA_CLIENT_NAME, CONFIG_KAFKA_CLIENT_NAME_DEFAULT); fetchCorrelationId = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID, CONFIG_KAFKA_FETCH_REQUEST_CORRELATION_ID_DEFAULT); fetchTopicRetries = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES, CONFIG_KAFKA_FETCH_TOPIC_NUM_TRIES_DEFAULT); fetchOffsetRetries = ConfigUtils.getInt(config, CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES, CONFIG_KAFKA_FETCH_OFFSET_NUM_TRIES_DEFAULT); }
@Override public FileSystem instrumentFileSystem(FileSystem fs, SharedResourcesBroker<S> broker, ConfigView<S, FileSystemKey> config) { try { String serviceName = ConfigUtils.getString(config.getConfig(), SERVICE_NAME_CONF_KEY, ""); Limiter limiter = broker.getSharedResource(new SharedLimiterFactory<S>(), new FileSystemLimiterKey(config.getKey().getUri())); return new ThrottledFileSystem(fs, limiter, serviceName); } catch (NotConfiguredException nce) { throw new RuntimeException(nce); } } }