@BeforeClass public void setupServer() throws Exception { Logging.initialize(); server = new TestingPrestoServer(); server.installPlugin(new HiveHadoop2Plugin()); server.createCatalog("hive", "hive-hadoop2", ImmutableMap.<String, String>builder() .put("hive.metastore", "file") .put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString()) .put("hive.security", "sql-standard") .build()); try (Connection connection = createConnection(); Statement statement = connection.createStatement()) { statement.execute("CREATE SCHEMA default"); statement.execute("CREATE SCHEMA fruit"); } }
queryRunner.createCatalog("tpch", "tpch"); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
private static DistributedQueryRunner createQueryRunner() throws Exception { DistributedQueryRunner queryRunner = new DistributedQueryRunner(testSessionBuilder() .setSource(TestSpatialJoins.class.getSimpleName()) .setCatalog("hive") .setSchema("default") .build(), 4); queryRunner.installPlugin(new GeoPlugin()); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile(); HiveClientConfig hiveClientConfig = new HiveClientConfig(); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig)); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication()); FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); metastore.createDatabase(Database.builder() .setDatabaseName("default") .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) .build()); queryRunner.installPlugin(new HivePlugin("hive", Optional.of(metastore))); queryRunner.createCatalog("hive", "hive"); return queryRunner; }
@BeforeClass public void setupServer() throws Exception { Logging.initialize(); server = new TestingPrestoServer(); server.installPlugin(new HiveHadoop2Plugin()); server.createCatalog("hive", "hive-hadoop2", ImmutableMap.<String, String>builder() .put("hive.metastore", "file") .put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString()) .put("hive.security", "sql-standard") .build()); try (Connection connection = createConnection(); Statement statement = connection.createStatement()) { statement.execute("CREATE SCHEMA default"); statement.execute("CREATE SCHEMA fruit"); } }
queryRunner.createCatalog("tpch", "tpch"); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
queryRunner.createCatalog("tpch_sampled", "tpch_sampled"); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile(); InMemoryHiveMetastore metastore = new InMemoryHiveMetastore(baseDir); metastore.createDatabase(createDatabaseMetastoreObject(baseDir, "tpch"));
private static DistributedQueryRunner createQueryRunner() throws Exception { DistributedQueryRunner queryRunner = new DistributedQueryRunner(testSessionBuilder() .setSource(TestSpatialJoins.class.getSimpleName()) .setCatalog("hive") .setSchema("default") .build(), 4); queryRunner.installPlugin(new GeoPlugin()); File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile(); HiveClientConfig hiveClientConfig = new HiveClientConfig(); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig)); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication()); FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); metastore.createDatabase(Database.builder() .setDatabaseName("default") .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) .build()); queryRunner.installPlugin(new HivePlugin("hive", Optional.of(metastore))); queryRunner.createCatalog("hive", "hive"); return queryRunner; }