private Warehouse getWh() throws MetaException { if (wh == null) { wh = new Warehouse(conf); } return wh; }
private Warehouse getWh() throws MetaException { if (wh == null) { wh = new Warehouse(conf); } return wh; }
WarehousePartitionHelper(Configuration configuration, Path tablePath, List<String> partitionColumns) throws MetaException { this.tablePath = tablePath; this.partitionColumns = partitionColumns; this.partitions = new LinkedHashMap<>(partitionColumns.size()); for (String partitionColumn : partitionColumns) { partitions.put(partitionColumn, null); } warehouse = new Warehouse(configuration); }
public Context(String dumpDirectory, HiveConf hiveConf, Hive hiveDb, LineageState lineageState, org.apache.hadoop.hive.ql.Context nestedContext) throws MetaException { this.dumpDirectory = dumpDirectory; this.hiveConf = hiveConf; this.hiveDb = hiveDb; this.warehouse = new Warehouse(hiveConf); this.pathInfo = new PathInfo(hiveConf); sessionStateLineageState = lineageState; this.nestedContext = nestedContext; } }
private void deleteDir(Path dir, Database db) throws HiveException { try { Warehouse wh = new Warehouse(conf); wh.deleteDir(dir, true, db); } catch (MetaException e) { throw new HiveException(e); } }
private void deleteDir(Path dir) throws HiveException { try { Warehouse wh = new Warehouse(conf); wh.deleteDir(dir, true); } catch (MetaException e) { throw new HiveException(e); } }
@Override protected String expectedBaseDir() throws MetaException { return new Warehouse(conf).getWhRoot().toUri().getPath(); } }
@Before public void openWarehouse() throws Exception { warehouse = new Warehouse(conf); client = createClient(); }
@Before public void setUp() throws Exception { warehouse = new Warehouse(conf); // set some values to use for getting conf. vars MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); conf.set("hive.key1", "value1"); conf.set("hive.key2", "http://www.example.com"); conf.set("hive.key3", ""); conf.set("hive.key4", "0"); conf.set("datanucleus.autoCreateTables", "false"); conf.set("hive.in.test", "true"); MetaStoreTestUtils.setConfForStandloneMode(conf); MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); }
/** * Starts the MetaStoreService. Be aware, as the current MetaStore does not implement clean * shutdown, starting MetaStoreService is possible only once per test. * * @throws Exception if any Exception occurs */ public void start() throws Exception { warehouse = new Warehouse(configuration); warehouseRootFs = warehouse.getFs(warehouse.getWhRoot()); TrashPolicy trashPolicy = TrashPolicy.getInstance(configuration, warehouseRootFs); trashDir = trashPolicy.getCurrentTrashDir(); }
@Before public void setUpForTest() throws Exception { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class); MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); conf.set("hive.key1", "value1"); conf.set("hive.key2", "http://www.example.com"); conf.set("hive.key3", ""); conf.set("hive.key4", "0"); conf.set("datanucleus.autoCreateTables", "false"); conf.set("hive.in.test", "true"); MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); MetaStoreTestUtils.setConfForStandloneMode(conf); warehouse = new Warehouse(conf); }
private ObjectStore createObjectStore() throws MetaException, InvalidOperationException { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, StatementVerifyingDerby.class.getName()); String jdbcUrl = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY); jdbcUrl = jdbcUrl.replace("derby","sderby"); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY,jdbcUrl); MetaStoreTestUtils.setConfForStandloneMode(conf); final ObjectStore objectStore = new ObjectStore(); objectStore.setConf(conf); HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); return objectStore; }
private void handleLineage(LoadTableDesc ltd, Operator output) throws SemanticException { if (ltd != null) { queryState.getLineageState() .mapDirToOp(ltd.getSourcePath(), output); } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) { Path tlocation = null; String tName = Utilities.getDbTableName(tableDesc.getTableName())[1]; try { Warehouse wh = new Warehouse(conf); tlocation = wh.getDefaultTablePath(db.getDatabase(tableDesc.getDatabaseName()), tName, tableDesc.isExternal()); } catch (MetaException|HiveException e) { throw new SemanticException(e); } queryState.getLineageState() .mapDirToOp(tlocation, output); } }
@BeforeClass public static void setUp() throws Exception { HiveMetaStore.TEST_TIMEOUT_ENABLED = true; conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS, MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class); MetastoreConf.setTimeVar(conf, ConfVars.CLIENT_SOCKET_TIMEOUT, 1000, TimeUnit.MILLISECONDS); MetaStoreTestUtils.setConfForStandloneMode(conf); warehouse = new Warehouse(conf); client = new HiveMetaStoreClient(conf); }
@Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); dropAllStoreObjects(objectStore); HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); }
@Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); // Disable memory estimation for this test class MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); cachedStore = new CachedStore(); cachedStore.setConfForTest(conf); // Stop the CachedStore cache update service. We'll start it explicitly to control the test CachedStore.stopCacheUpdateService(1); sharedCache = new SharedCache(); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); // Create the 'hive' catalog HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); }
@Override protected void setUp() throws Exception { if (isServerRunning) { return; } hcatConf = new HiveConf(this.getClass()); MetaStoreTestUtils.startMetaStoreWithRetry(hcatConf); isServerRunning = true; securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 60, TimeUnit.SECONDS); hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hcatConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); hcatConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); hcatConf.set(HiveConf.ConfVars.METASTOREURIS.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); clientWH = new Warehouse(hcatConf); msc = new HiveMetaStoreClient(hcatConf); }
private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticException { try { String protoName = null; boolean isExternal = false; if (pCtx.getQueryProperties().isCTAS()) { protoName = pCtx.getCreateTable().getTableName(); isExternal = pCtx.getCreateTable().isExternal(); } else if (pCtx.getQueryProperties().isMaterializedView()) { protoName = pCtx.getCreateViewDesc().getViewName(); } String[] names = Utilities.getDbTableName(protoName); if (!db.databaseExists(names[0])) { throw new SemanticException("ERROR: The database " + names[0] + " does not exist."); } Warehouse wh = new Warehouse(conf); return wh.getDefaultTablePath(db.getDatabase(names[0]), names[1], isExternal); } catch (HiveException e) { throw new SemanticException(e); } catch (MetaException e) { throw new SemanticException(e); } }
@Before public void createObjectStore() throws MetaException, InvalidOperationException { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); // Create three catalogs HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); Catalog cat1 = new CatalogBuilder() .setName(CAT1_NAME) .setLocation("/tmp/cat1") .build(); objectStore.createCatalog(cat1); Catalog cat2 = new CatalogBuilder() .setName(CAT2_NAME) .setLocation("/tmp/cat2") .build(); objectStore.createCatalog(cat2); }
@Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false); MetaStoreTestUtils.setConfForStandloneMode(conf); store = new ObjectStore(); store.setConf(conf); dropAllStoreObjects(store); HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf)); HyperLogLog hll = HyperLogLog.builder().build(); hll.addLong(1); bitVectors[1] = hll.serialize(); hll = HyperLogLog.builder().build(); hll.addLong(2); hll.addLong(3); hll.addLong(3); hll.addLong(4); bitVectors[0] = hll.serialize(); }