private static Configuration readConfiguration(List<String> resourcePaths) { Configuration result = new Configuration(false); for (String resourcePath : resourcePaths) { Configuration resourceProperties = new Configuration(false); resourceProperties.addResource(new Path(resourcePath)); copy(resourceProperties, result); } return result; }
/** * Create FileSystem object */ public FileSystem createFs() throws IOException { fs = FileSystem.get(new Configuration()); return fs; }
public static String runTask(String[] args) throws Exception { String workingPath = args[0]; log.info("Deleting indexing hadoop working path [%s].", workingPath); Path p = new Path(workingPath); FileSystem fs = p.getFileSystem(new Configuration()); fs.delete(p, true); return null; } }
@Test public void testOldStyleConfiguration() { Configuration conf = new Configuration(); conf.set("dfs.web.ugi", "joe,group1,group2"); assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf)); }
@Test public void parseZkUriWithPlusDelimiters() throws Exception { org.apache.hadoop.fs.FileSystem fs = FileSystem.get(URI.create("alluxio://zk@a:0+b:1+c:2/"), new org.apache.hadoop.conf.Configuration()); assertTrue(fs instanceof AbstractFileSystem); AbstractFileSystem afs = (AbstractFileSystem) fs; assertTrue(afs.mFsContext.getConf() .getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("a:0,b:1,c:2", afs.mFsContext.getConf() .get(PropertyKey.ZOOKEEPER_ADDRESS)); }
public static void main(String[] args) throws Exception { if (args.length != 2) { usage(); System.exit(1); } Configuration conf = new Configuration(); Path path = new Path(args[0]); FileSystem fs = path.getFileSystem(conf); if (fs.exists(path)) { System.err.println("The specified path exists, aborting!"); System.exit(1); } try { doSmokeTest(fs, path, args[1]); } finally { fs.delete(path, false); } System.out.println("SUCCESS"); } }
@Test public void testNoCodec() { Configuration c = new Configuration(); c.set("hbase.client.default.rpc.codec", ""); String codec = AbstractRpcClient.getDefaultCodec(c); assertTrue(codec == null || codec.length() == 0); } }
@Test public void testConfigure() throws Exception { ConfigurableDummyOutputFormat outputFormat = mock(ConfigurableDummyOutputFormat.class); HadoopOutputFormat<String, Long> hadoopOutputFormat = setupHadoopOutputFormat(outputFormat, Job.getInstance(), null, null, new Configuration()); hadoopOutputFormat.configure(new org.apache.flink.configuration.Configuration()); verify(outputFormat, times(1)).setConf(any(Configuration.class)); }
public void printFile(String path) throws Exception { FileSystem fileSystem = FileUtil.getFileSystem(path); Path fsPath = new Path(path); SequenceFile.Reader reader = new SequenceFile.Reader(fileSystem, fsPath, new Configuration()); LongWritable key = (LongWritable) reader.getKeyClass().newInstance(); BytesWritable value = (BytesWritable) reader.getValueClass().newInstance(); System.out.println("reading file " + path); while (reader.next(key, value)) { if (mPrintOffsetsOnly) { System.out.println(Long.toString(key.get())); } else { byte[] nonPaddedBytes = new byte[value.getLength()]; System.arraycopy(value.getBytes(), 0, nonPaddedBytes, 0, value.getLength()); System.out.println(Long.toString(key.get()) + ": " + new String(nonPaddedBytes)); } } } }
/** * Test for the {@link HadoopConfigurationUtils#mergeHadoopConfiguration} method. */ @Test public void mergeHadoopConfiguration() { org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); hadoopConfig.set(PropertyKey.S3A_ACCESS_KEY.toString(), TEST_S3_ACCCES_KEY); hadoopConfig.set(PropertyKey.S3A_SECRET_KEY.toString(), TEST_S3_SECRET_KEY); hadoopConfig.set(TEST_ALLUXIO_PROPERTY, TEST_ALLUXIO_VALUE); hadoopConfig.setBoolean(PropertyKey.ZOOKEEPER_ENABLED.getName(), true); hadoopConfig.set(PropertyKey.ZOOKEEPER_ADDRESS.getName(), "host1:port1,host2:port2;host3:port3"); // This hadoop config will not be loaded into Alluxio configuration. hadoopConfig.set("hadoop.config.parameter", "hadoop config value"); mConf = HadoopConfigurationUtils.mergeHadoopConfiguration(hadoopConfig, mConf.copyProperties()); Assert.assertEquals(TEST_S3_ACCCES_KEY, mConf.get(PropertyKey.S3A_ACCESS_KEY)); Assert.assertEquals(TEST_S3_SECRET_KEY, mConf.get(PropertyKey.S3A_SECRET_KEY)); Assert.assertEquals(Source.RUNTIME, mConf.getSource(PropertyKey.S3A_ACCESS_KEY)); Assert.assertEquals(Source.RUNTIME, mConf.getSource(PropertyKey.S3A_SECRET_KEY)); Assert.assertTrue(mConf.getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); Assert.assertEquals("host1:port1,host2:port2;host3:port3", mConf.get(PropertyKey.ZOOKEEPER_ADDRESS)); } }
@Test public void testRequiresAuthorizationAccess() throws Exception { Configuration conf = new Configuration(); ServletContext context = Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); //requires admin access to instrumentation, FALSE by default Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); //requires admin access to instrumentation, TRUE conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); AccessControlList acls = Mockito.mock(AccessControlList.class); Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); }
/** * Test for the {@link HadoopConfigurationUtils#mergeHadoopConfiguration} method for an empty * configuration. */ @Test public void mergeEmptyHadoopConfiguration() { org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); long beforeSize = mConf.toMap().size(); mConf = HadoopConfigurationUtils.mergeHadoopConfiguration(hadoopConfig, mConf.copyProperties()); long afterSize = mConf.toMap().size(); Assert.assertEquals(beforeSize, afterSize); Assert.assertFalse(mConf.getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); }
@Test public void testWarnStuckTasks() throws Exception { final int INTERVAL = 1000; Configuration conf = new Configuration(); conf.setLong(TaskMonitor.RPC_WARN_TIME_KEY, INTERVAL); conf.setLong(TaskMonitor.MONITOR_INTERVAL_KEY, INTERVAL); final TaskMonitor tm = new TaskMonitor(conf); MonitoredRPCHandler t = tm.createRPCStatus("test task"); long then = EnvironmentEdgeManager.currentTime(); t.setRPC("testMethod", new Object[0], then); Thread.sleep(INTERVAL * 2); assertTrue("We did not warn", t.getWarnTime() > then); tm.shutdown(); }
@Test public void testDatabaseLocation() throws Exception { HCatClient client = HCatClient.create(new Configuration(hcatConf)); String dbName = "locationDB"; client.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); HCatCreateDBDesc dbDesc = HCatCreateDBDesc.create(dbName) .ifNotExists(true).location("/tmp/" + dbName).build(); client.createDatabase(dbDesc); HCatDatabase newDB = client.getDatabase(dbName); assertTrue(newDB.getLocation().matches(".*/tmp/" + dbName)); client.close(); }
@AfterClass public void tearDown() throws IOException { FileSystem fs = FileSystem.getLocal(new Configuration(false)); Path rootDir = new Path(TEST_STATE_STORE_ROOT_DIR); if (fs.exists(rootDir)) { fs.delete(rootDir, true); } } }
private List<Path> getFilesRecursively(String fileBackupDir) throws IllegalArgumentException, IOException { FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), new Configuration()); List<Path> list = new ArrayList<>(); RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(fileBackupDir), true); while (it.hasNext()) { Path p = it.next().getPath(); if (HFile.isHFileFormat(fs, p)) { list.add(p); } } return list; }
@Inject @Provides @Singleton public Configuration createHadoopConfiguration() { final String hadoopConfDirPath = requireNonNull(this.props.get(HADOOP_CONF_DIR_PATH)); final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath)); checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory()); final Configuration hadoopConf = new Configuration(false); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml")); hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml")); hadoopConf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); return hadoopConf; }
@Before public void initConfig() { hadoopConfig = new org.apache.hadoop.conf.Configuration(); hadoopConfig.set(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY, hdfsRootPath.toString()); }
@SuppressWarnings({"OverlyStrongTypeCast", "ConstantConditions"}) @Test public void testGetMetadataRetryCounter() { int maxRetries = 2; try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { MockAmazonS3 s3 = new MockAmazonS3(); s3.setGetObjectMetadataHttpCode(SC_INTERNAL_SERVER_ERROR); Configuration configuration = new Configuration(); configuration.set(S3_MAX_BACKOFF_TIME, "1ms"); configuration.set(S3_MAX_RETRY_TIME, "5s"); configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries); fs.initialize(new URI("s3n://test-bucket/"), configuration); fs.setS3Client(s3); fs.getS3ObjectMetadata(new Path("s3n://test-bucket/test")); } catch (Throwable expected) { assertInstanceOf(expected, AmazonS3Exception.class); assertEquals(((AmazonS3Exception) expected).getStatusCode(), SC_INTERNAL_SERVER_ERROR); assertEquals(PrestoS3FileSystem.getFileSystemStats().getGetMetadataRetries().getTotalCount(), maxRetries); } }
@Test public void testFromConfigurationOverride() throws Exception { Configuration configuration = new Configuration(); configuration.set("fs.file.impl", InstrumentedLocalFileSystem.class.getName()); FileSystem fs = FileSystem.newInstance(new URI("file:///"), configuration); Assert.assertTrue(fs instanceof InstrumentedLocalFileSystem); Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem); Assert.assertEquals(fs.getFileStatus(new Path("/tmp")).getPath(), new Path("file:///tmp")); Assert.assertEquals(fs.getUri().getScheme(), "file"); }