public void updateConfiguration(Configuration config) { copy(resourcesConfiguration, config); // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class, DNSToSwitchMapping.class); if (socksProxy != null) { config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class, SocketFactory.class); config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString()); } if (domainSocketPath != null) { config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath); } // only enable short circuit reads if domain socket path is properly configured if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) { config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); } config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis())); config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis())); config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis())); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries); if (isHdfsWireEncryptionEnabled) { config.set(HADOOP_RPC_PROTECTION, "privacy"); config.setBoolean("dfs.encrypt.data.transfer", true); } config.setInt("fs.cache.max-size", fileSystemMaxCacheSize); config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength); configureCompression(config, compressionCodec); s3ConfigurationUpdater.updateConfiguration(config); }
@Inject @Provides @Singleton public Configuration createHadoopConfiguration() { final String hadoopConfDirPath = requireNonNull(this.props.get(HADOOP_CONF_DIR_PATH)); final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath)); checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory()); final Configuration hadoopConf = new Configuration(false); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml")); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml")); hadoopConf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); return hadoopConf; }
@Test public void testConsumerProperties() { Configuration configuration = new Configuration(); configuration.set("kafka.bootstrap.servers", "localhost:9090"); configuration.set("kafka.consumer.fetch.max.wait.ms", "40"); configuration.set("kafka.consumer.my.new.wait.ms", "400"); Properties properties = KafkaUtils.consumerProperties(configuration); Assert.assertEquals("localhost:9090", properties.getProperty("bootstrap.servers")); Assert.assertEquals("40", properties.getProperty("fetch.max.wait.ms")); Assert.assertEquals("400", properties.getProperty("my.new.wait.ms")); }
@Test public void testGetZooKeeperClusterKey() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n"); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333"); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase"); String clusterKey = ZKConfig.getZooKeeperClusterKey(conf, "test"); assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n")); assertEquals("localhost:3333:hbase,test", clusterKey); }
/** * Tests that HdfsUtils#setFullFileStatus * does not thrown an exception when setting permissions and with recursion. */ @Test public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { Configuration conf = new Configuration(); conf.set("dfs.namenode.acls.enabled", "false"); Path fakeTarget = new Path("fakePath"); HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class); FileStatus mockSourceStatus = mock(FileStatus.class); FsShell mockFsShell = mock(FsShell.class); when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777)); when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus); doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); } }
@Test public void testNoCodec() { Configuration c = new Configuration(); c.set("hbase.client.default.rpc.codec", ""); String codec = AbstractRpcClient.getDefaultCodec(c); assertTrue(codec == null || codec.length() == 0); } }
@Test public void testCreateBasedOnConfig() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName()); StripeStoreEngine se = createEngine(conf); assertTrue(se.getCompactionPolicy() instanceof StripeCompactionPolicy); }
@Test public void testFileBucketCacheConfig() throws IOException { HBaseTestingUtility htu = new HBaseTestingUtility(this.conf); try { Path p = new Path(htu.getDataTestDir(), "bc.txt"); FileSystem fs = FileSystem.get(this.conf); fs.create(p).close(); this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p); doBucketCacheConfigTest(); } finally { htu.cleanupTestDir(); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Error creating an instance of .*") public void testCustomCredentialsClassCannotBeFound() throws Exception { Configuration config = new Configuration(); config.set(S3_USE_INSTANCE_CREDENTIALS, "false"); config.set(S3_CREDENTIALS_PROVIDER, "com.example.DoesNotExist"); try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { fs.initialize(new URI("s3n://test-bucket/"), config); } }
static void setWorkOutputPath(TaskAttemptContext context) throws IOException { String outputPath = context.getConfiguration().get("mapred.output.dir"); //we need to do this to get the task path and set it for mapred implementation //since it can't be done automatically because of mapreduce->mapred abstraction if (outputPath != null) context.getConfiguration().set("mapred.work.output.dir", new FileOutputCommitter(new Path(outputPath), context).getWorkPath().toString()); } }
@SuppressWarnings({"OverlyStrongTypeCast", "ConstantConditions"}) @Test public void testGetMetadataRetryCounter() { int maxRetries = 2; try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { MockAmazonS3 s3 = new MockAmazonS3(); s3.setGetObjectMetadataHttpCode(SC_INTERNAL_SERVER_ERROR); Configuration configuration = new Configuration(); configuration.set(S3_MAX_BACKOFF_TIME, "1ms"); configuration.set(S3_MAX_RETRY_TIME, "5s"); configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries); fs.initialize(new URI("s3n://test-bucket/"), configuration); fs.setS3Client(s3); fs.getS3ObjectMetadata(new Path("s3n://test-bucket/test")); } catch (Throwable expected) { assertInstanceOf(expected, AmazonS3Exception.class); assertEquals(((AmazonS3Exception) expected).getStatusCode(), SC_INTERNAL_SERVER_ERROR); assertEquals(PrestoS3FileSystem.getFileSystemStats().getGetMetadataRetries().getTotalCount(), maxRetries); } }
@VisibleForTesting static void serializeJobState(FileSystem fs, Path mrJobDir, Configuration conf, JobState jobState, Job job) throws IOException { Path jobStateFilePath = new Path(mrJobDir, JOB_STATE_FILE_NAME); // Write the job state with an empty task set (work units are read by the mapper from a different file) try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath))) { jobState.write(dataOutputStream, false, conf.getBoolean(SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY, DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES)); } job.getConfiguration().set(ConfigurationKeys.JOB_STATE_FILE_PATH_KEY, jobStateFilePath.toString()); DistributedCache.addCacheFile(jobStateFilePath.toUri(), job.getConfiguration()); job.getConfiguration().set(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME, jobStateFilePath.getName()); }
private static void addInputPath(Job job, Iterable<String> pathStrings, Class<? extends InputFormat> inputFormatClass) { Configuration conf = job.getConfiguration(); StringBuilder inputFormats = new StringBuilder( StringUtils.nullToEmptyNonDruidDataString(conf.get(MultipleInputs.DIR_FORMATS)) ); String[] paths = Iterables.toArray(pathStrings, String.class); for (int i = 0; i < paths.length - 1; i++) { if (inputFormats.length() > 0) { inputFormats.append(','); } inputFormats.append(paths[i]).append(';').append(inputFormatClass.getName()); } if (inputFormats.length() > 0) { conf.set(MultipleInputs.DIR_FORMATS, inputFormats.toString()); } // add last one separately for possible initialization in MultipleInputs MultipleInputs.addInputPath(job, new Path(paths[paths.length - 1]), inputFormatClass); }
private void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); conf.set("hbase.balancer.tablesOnMaster", "none"); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); conf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 3); conf.setBoolean("hbase.split.writer.creation.bounded", true); conf.setInt("hbase.regionserver.hlog.splitlog.writer.threads", 8); LOG.info("WAL splitting coordinated by zk? {}", splitWALCoordinatedByZK); conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, splitWALCoordinatedByZK); }
@Before public void initConfig() { hadoopConfig = new org.apache.hadoop.conf.Configuration(); hadoopConfig.set(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY, hdfsRootPath.toString()); }
public static void setUpBaseConf(Configuration conf) { conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.setInt("hbase.regionserver.msginterval", 100); conf.setInt("hbase.client.pause", 250); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); conf.setBoolean("hbase.master.enabletable.roundrobin", true); conf.setInt("mapreduce.map.maxattempts", 10); conf.set(HConstants.HBASE_DIR, testDir.toString()); }
public static Path registerFile(DataCache cache, Path path, Object fileKey, TreeMap<Long, Long> index, Configuration conf, String tag) throws IOException { long splitId = currentSplitId.incrementAndGet(); CacheAwareInputStream stream = new CacheAwareInputStream( cache, conf, index, path, fileKey, -1, tag); if (files.putIfAbsent(splitId, stream) != null) { throw new IOException("Record already exists for " + splitId); } conf.set("fs." + LlapCacheAwareFs.SCHEME + ".impl", LlapCacheAwareFs.class.getCanonicalName()); return new Path(SCHEME + "://" + SCHEME + "/" + splitId); }
@Before public void setUp() throws Exception { baseConf = new Configuration(); baseConf.set("A", "1"); baseConf.setInt("B", 2); baseConf.set("C", "3"); baseConfSize = baseConf.size(); }