inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize); HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false); IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
createDefaultDB(); createDefaultRoles(); addAdminUsers(); currentUrl = MetaStoreInit.getConnectionURL(conf); tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES); partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS); updateMetrics(); fileMetadataManager = new FileMetadataManager(this.getMS(), conf); isServerFilterEnabled = getIfServerFilterenabled(); filterHook = isServerFilterEnabled ? loadFilterHooks() : null;
tConfig.set(context.getHandler().getConf());
firePreEvent(new PreDropDatabaseEvent(db, this)); String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf); Set<String> uniqueTableNames = new HashSet<>(get_all_tables(catPrependedName)); List<String> allFunctions = get_functions(catPrependedName, "*"); drop_function(catPrependedName, funcName); List<String> materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString()); int startIndex = 0; if (!isSubdirectory(databasePath, materializedViewPath)) { tablePaths.add(materializedViewPath); drop_table(name, materializedView.getTableName(), false); if (table.getSd().getLocation() != null && !isExternal(table)) { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); if (!wh.isWritable(tablePath.getParent())) { if (!isSubdirectory(databasePath, tablePath)) { tablePaths.add(tablePath); partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf), table.getTableName(), false);
db = ms.getDatabase(name); firePreEvent(new PreDropDatabaseEvent(db, this)); List<String> allTables = get_all_tables(db.getName()); List<String> allFunctions = get_functions(db.getName(), "*"); drop_function(name, funcName); if (table.getSd().getLocation() != null && !isExternal(table)) { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); if (!wh.isWritable(tablePath.getParent())) { if (!isSubdirectory(databasePath, tablePath)) { tablePaths.add(tablePath); partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); drop_table(name, table.getTableName(), false); } else if (deleteData) { deletePartitionData(partitionPaths); deleteTableData(tablePath);
db = ms.getDatabase(name); firePreEvent(new PreDropDatabaseEvent(db, this)); List<String> allTables = get_all_tables(db.getName()); List<String> allFunctions = get_functions(db.getName(), "*"); drop_function(name, funcName); if (table.getSd().getLocation() != null && !isExternal(table)) { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); if (!wh.isWritable(tablePath.getParent())) { if (!isSubdirectory(databasePath, tablePath)) { tablePaths.add(tablePath); partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); drop_table(name, table.getTableName(), false); } else if (deleteData) { deletePartitionData(partitionPaths); deleteTableData(tablePath);
db = ms.getDatabase(name); firePreEvent(new PreDropDatabaseEvent(db, this)); List<String> allTables = get_all_tables(db.getName()); if (!cascade && !allTables.isEmpty()) { throw new InvalidOperationException("Database " + db.getName() + " is not empty"); if (table.getSd().getLocation() != null && !isExternal(table)) { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); if (!wh.isWritable(tablePath.getParent())) { if (!isSubdirectory(databasePath, tablePath)) { tablePaths.add(tablePath); partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); drop_table(name, table.getTableName(), false); } else if (deleteData) { deletePartitionData(partitionPaths); deleteTableData(tablePath);
createDefaultDB(); createDefaultRoles(); addAdminUsers(); currentUrl = MetaStoreInit.getConnectionURL(conf); tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES); partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS); updateMetrics(); fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
createDefaultDB(); createDefaultRoles(); addAdminUsers(); currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
createDefaultDB(); createDefaultRoles(); addAdminUsers(); currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
wh = new Warehouse(hiveConf); createDefaultDB();
HMSHandler baseHandler = new HiveMetaStore.HMSHandler( "new db based metaserver", conf, true); rawStore = baseHandler.getMS();
startFunction("get_schema", ": db=" + db + "tbl=" + tableName); try { String[] names = tableName.split("\\."); tbl = get_table(db, base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); List<FieldSchema> fieldSchemas = get_fields(db, base_table_name); endFunction("get_schema");
event.getHandler().get_table( event.getOldPartition().getDbName(), event.getOldPartition().getTableName()
final String dbName, final String tableName, final List<String> partValues) throws MetaException, TException { incrementCounter("list_security_partition_grant"); RawStore ms = getMS(); Table tbl = get_table(dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); List<MPartitionPrivilege> mParts = ms.listPrincipalPartitionGrants(
HMSHandler baseHandler = new HiveMetaStore.HMSHandler( "new db based metaserver", conf, true); rawStore = baseHandler.getMS();
String tableLocation = context .getHandler() .get_table(mapiPart.getDbName(), mapiPart.getTableName()).getSd().getLocation(); String uriPath;
String partitionLocation = getSdLocation(partition.getSd()); if (!StringUtils.isEmpty(partitionLocation)) { String tableLocation = context.getHandler().get_table( partition.getDbName(), partition.getTableName()).getSd().getLocation();
HMSHandler baseHandler = new HiveMetaStore.HMSHandler( "new db based metaserver", conf, true); rawStore = baseHandler.getMS();
throws MetaException { String connectUrl = null; String currentUrl = getConnectionURL(conf); try { initConnectionUrlHook(); if (urlHook != null) { if (badUrl != null) { LOG.error(addPrefix( String.format("Overriding %s with %s", HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),