Refine search
private static Configuration getHiveSiteContentsFromClasspath() { Configuration configuration = new Configuration(false); // Don't load defaults. configuration.addResource("hive-site.xml"); // NOTE: hive-site.xml is only available on client, not AM. return configuration; }
private org.apache.hadoop.conf.Configuration getConf(String hadoopConfPath) { org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration(); if (Strings.isNullOrEmpty(hadoopConfPath)) { LOGGER.warn("no hadoop conf path is provided, will rely on default config"); } else { hadoopConf.addResource(new Path(hadoopConfPath, "core-site.xml")); hadoopConf.addResource(new Path(hadoopConfPath, "hdfs-site.xml")); } return hadoopConf; } }
public static Configuration grabConfiguration(String hadoopConfDir, Configuration conf){ boolean oldVersionHadoop = new File(hadoopConfDir, "hadoop-default.xml").exists() || new File(hadoopConfDir, "hadoop-site.xml").exists(); if(oldVersionHadoop){ conf.addResource(new Path(hadoopConfDir, "hadoop-default.xml")); conf.addResource(new Path(hadoopConfDir, "hadoop-site.xml")); }else{ conf.addResource(new Path(hadoopConfDir, "mapred-site.xml")); conf.addResource(new Path(hadoopConfDir, "hdfs-site.xml")); conf.addResource(new Path(hadoopConfDir, "core-site.xml")); } return conf; } }
private Configuration getConf(String hadoopConfPath) { Configuration hadoopConf = new Configuration(); if (Strings.isNullOrEmpty(hadoopConfPath)) { LOGGER.warn("no hadoop conf path is provided, will rely on default config"); } else { hadoopConf.addResource(new Path(hadoopConfPath, "core-site.xml")); hadoopConf.addResource(new Path(hadoopConfPath, "hdfs-site.xml")); } return hadoopConf; }
/** * Read the {@link Configuration} stored in the byte stream. * * @param bytes * to read from * @return A valid configuration */ private static Configuration readConfiguration(byte[] bytes) throws IOException { ByteArrayInputStream is = new ByteArrayInputStream(bytes); Configuration conf = new Configuration(false); conf.addResource(is); return conf; }
private static void addHadoopConfigPropertiesToConf(Configuration conf) { Set<String> propertyNames = CompactionRunner.properties.stringPropertyNames(); for (String propertyName : propertyNames) { if (propertyName.startsWith(HADOOP_CONFIGFILE_)) { String hadoopConfigFile = CompactionRunner.properties.getProperty(propertyName); conf.addResource(new Path(hadoopConfigFile)); LOG.info("Added Hadoop Config File: " + hadoopConfigFile); } } }
private static Configuration readConfiguration(List<String> resourcePaths) { Configuration result = new Configuration(false); for (String resourcePath : resourcePaths) { Configuration resourceProperties = new Configuration(false); resourceProperties.addResource(new Path(resourcePath)); copy(resourceProperties, result); } return result; }
public HiveSiteHS2ConnectionFileParser() { hiveSiteURI = HiveConf.getHiveSiteLocation(); conf = new Configuration(); if(hiveSiteURI == null) { log.debug("hive-site.xml not found for constructing the connection URL"); } else { log.info("Using hive-site.xml at " + hiveSiteURI); conf.addResource(hiveSiteURI); } }
protected Configuration getConfigurationFromFiles(final String configFiles) { final Configuration hbaseConfig = HBaseConfiguration.create(); if (StringUtils.isNotBlank(configFiles)) { for (final String configFile : configFiles.split(",")) { hbaseConfig.addResource(new Path(configFile.trim())); } } return hbaseConfig; }
@Inject @Provides @Singleton public Configuration createHadoopConfiguration() { final String hadoopConfDirPath = requireNonNull(this.props.get(HADOOP_CONF_DIR_PATH)); final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath)); checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory()); final Configuration hadoopConf = new Configuration(false); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml")); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml")); hadoopConf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); return hadoopConf; }
public static Configuration readSSLConfiguration(Configuration conf, Mode mode) { Configuration sslConf = new Configuration(false); sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, conf.getBoolean( SSL_REQUIRE_CLIENT_CERT_KEY, SSL_REQUIRE_CLIENT_CERT_DEFAULT)); String sslConfResource; if (mode == Mode.CLIENT) { sslConfResource = conf.get(SSL_CLIENT_CONF_KEY, SSL_CLIENT_CONF_DEFAULT); } else { sslConfResource = conf.get(SSL_SERVER_CONF_KEY, SSL_SERVER_CONF_DEFAULT); } sslConf.addResource(sslConfResource); return sslConf; }
protected Configuration getConfigurationFromFiles(final String configFiles) { final Configuration hbaseConfig = HBaseConfiguration.create(); if (StringUtils.isNotBlank(configFiles)) { for (final String configFile : configFiles.split(",")) { hbaseConfig.addResource(new Path(configFile.trim())); } } return hbaseConfig; }
private void init() throws Exception { Configuration conf = new Configuration(); conf.addResource(new Path(confLocation)); String tokenStoreClassName = MetastoreConf.getVar(conf,MetastoreConf.ConfVars.DELEGATION_TOKEN_STORE_CLS, ""); if (StringUtils.isBlank(tokenStoreClassName)) { throw new Exception("Could not find Delegation TokenStore implementation."); } Class<? extends DelegationTokenStore> clazz = Class.forName(tokenStoreClassName).asSubclass(DelegationTokenStore.class); delegationTokenStore = ReflectionUtils.newInstance(clazz, conf); delegationTokenStore.init(null, serverMode); }
public void refresh(Configuration conf, PolicyProvider provider) { // Get the system property 'hadoop.policy.file' String policyFile = System.getProperty("hadoop.policy.file", HADOOP_POLICY_FILE); // Make a copy of the original config, and load the policy file Configuration policyConf = new Configuration(conf); policyConf.addResource(policyFile); refreshWithLoadedConfiguration(policyConf, provider); }
public JstormMaster() { // Set up the configuration conf = new YarnConfiguration(); Path jstormyarnConfPath = new Path("jstorm-yarn.xml"); conf.addResource(jstormyarnConfPath); }
Configuration retConf = new Configuration(); .HDFS_DEFAULT_CONFIG, null); if (hdfsDefaultPath != null) { retConf.addResource(new org.apache.hadoop.fs.Path(hdfsDefaultPath)); } else { LOG.debug("Cannot find hdfs-default configuration file"); retConf.addResource(new org.apache.hadoop.fs.Path(hdfsSitePath)); } else { LOG.debug("Cannot find hdfs-site configuration file"); if (new File(possibleHadoopConfPath).exists()) { if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) { retConf.addResource(new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/core-site.xml")); retConf.addResource(new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/hdfs-site.xml"));
private void setupConf() { for (String f : LlapDaemonConfiguration.DAEMON_CONFIGS) { conf.addResource(f); } conf.reloadConfiguration(); // Setup timeouts for various services. // Once we move to a Hadoop-2.8 dependency, the following paramteer can be used. // conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC); conf.set("yarn.timeline-service.entity-group-fs-store.retry-policy-spec", conf.get(CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC, CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, conf.getLong(CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS, CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, conf.getLong(CONFIG_YARN_RM_RETRY_INTERVAL_MS, CONFIG_YARN_RM_RETRY_INTERVAL_MS_DEFAULT)); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, conf.getInt(CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES, CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT)); conf.setLong(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, conf.getLong(CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS, CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS_DEFAULT)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, ( conf.getLong(CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS, CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS_DEFAULT) + "ms")); llapRegistryConf = new Configuration(conf); }
public static Configuration getConfigurationFromFiles(final String configFiles) { final Configuration hiveConfig = new HiveConf(); if (StringUtils.isNotBlank(configFiles)) { for (final String configFile : configFiles.split(",")) { hiveConfig.addResource(new Path(configFile.trim())); } } return hiveConfig; } }
public Properties getConnectionProperties(String propertyValue) throws BeelineSiteParseException { Properties props = new Properties(); String fileLocation = getFileLocation(); if (fileLocation == null) { log.debug("Could not find Beeline configuration file: {}", DEFAULT_BEELINE_SITE_FILE_NAME); return props; } log.info("Beeline configuration file at: {}", fileLocation); // load the properties from config file Configuration conf = new Configuration(false); conf.addResource(new Path(new File(fileLocation).toURI())); try { for (Entry<String, String> kv : conf) { String key = kv.getKey(); if (key.startsWith(BEELINE_CONNECTION_NAMED_JDBC_URL_PREFIX) && (propertyValue.equalsIgnoreCase(kv.getValue()))) { props.setProperty(key.substring(BEELINE_CONNECTION_NAMED_JDBC_URL_PREFIX.length()), kv.getValue()); } } } catch (Exception e) { throw new BeelineSiteParseException(e.getMessage(), e); } return props; }
private Properties loadKafkaConsumerProperties() { File propFile = getKafkaConsumerFile(); if (propFile == null || !propFile.exists()) { logger.warn("fail to locate {}, use empty kafka consumer properties", KAFKA_CONSUMER_FILE); return new Properties(); } Properties properties = new Properties(); try (FileInputStream is = new FileInputStream(propFile)) { Configuration conf = new Configuration(); conf.addResource(is); properties.putAll(extractKafkaConfigToProperties(conf)); File propOverrideFile = new File(propFile.getParentFile(), propFile.getName() + ".override"); if (propOverrideFile.exists()) { try (FileInputStream ois = new FileInputStream(propOverrideFile)) { Configuration oconf = new Configuration(); oconf.addResource(ois); properties.putAll(extractKafkaConfigToProperties(oconf)); } } } catch (FileNotFoundException fne) { throw new IllegalArgumentException(fne); } catch (IOException e) { // close inputStream quietly } return properties; }