@Override public List<HiveObjectPrivilege> listPrincipalDBGrantsAll( String principalName, PrincipalType principalType) { QueryWrapper queryWrapper = new QueryWrapper(); try { return convertDB(listPrincipalAllDBGrant(principalName, principalType, queryWrapper)); } finally { queryWrapper.close(); } }
@Override public boolean dropPartition(String catName, String db_name, String tbl_name, List<String> part_vals, boolean deleteData) throws TException { return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance() .deleteData(deleteData)); }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfos build() { org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
public static void dropDB(HiveConf conf, String databaseName) throws HiveException, MetaException { IMetaStoreClient client = new HiveMetaStoreClient(conf); try { for (String table : client.listTableNamesByFilter(databaseName, "", (short) -1)) { client.dropTable(databaseName, table, true, true); } client.dropDatabase(databaseName); } catch (TException e) { client.close(); } }
@Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List<FieldSchema> partKeys, List<String> partVals) throws MetaException { String name = Warehouse.makePartName(partKeys, partVals); return this.getMPartition(catName, dbName, tableName, name) != null; }
@Override public List<Partition> dropPartitions(String dbName, String tblName, List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException { // By default, we need the results from dropPartitions(); return dropPartitions(dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists)); }
private List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName, String authorizer) { QueryWrapper queryWrapper = new QueryWrapper(); try { return convertDB(listDatabaseGrants(catName, dbName, authorizer, queryWrapper)); } finally { queryWrapper.close(); } }
private static ProtectMode getProtectMode(Map<String, String> parameters) { if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) { return new ProtectMode(); } else { return getProtectModeFromString(parameters.get(ProtectMode.PARAMETER_NAME)); } }
protected void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVersion) throws HiveMetaException { if (!metaStoreSchemaInfo.isVersionCompatible(hiveSchemaVersion, dbSchemaVersion)) { throw new HiveMetaException("Metastore schema version is not compatible. Hive Version: " + hiveSchemaVersion + ", Database Schema Version: " + dbSchemaVersion); } }
private void cleanUp(String dbName, String tableName, String typeName) throws Exception { if(dbName != null && tableName != null) { client.dropTable(dbName, tableName); } if(dbName != null) { silentDropDatabase(dbName); } if(typeName != null) { client.dropType(typeName); } }
private void init() throws MetaException { // Using the hook on startup ensures that the hook always has priority // over settings in *.xml. The thread local conf needs to be used because at this point // it has already been initialized using conf. MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData); }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getInfosCount(); i++) { if (!getInfos(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; }
private static boolean isCompatible(Hive db, HiveConf c, boolean isFastCheck) { if (isFastCheck) { return (db.metaStoreClient == null || db.metaStoreClient.isSameConfObj(c)) && (db.syncMetaStoreClient == null || db.syncMetaStoreClient.isSameConfObj(c)); } else { return (db.metaStoreClient == null || db.metaStoreClient.isCompatibleWith(c)) && (db.syncMetaStoreClient == null || db.syncMetaStoreClient.isCompatibleWith(c)); } }
public final boolean isInitialized() { for (int i = 0; i < getInfosCount(); i++) { if (!getInfos(i).isInitialized()) { return false; } } return true; }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfo build() { org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
@Override protected boolean canUseDirectSql(GetHelper<Integer> ctx) throws MetaException { return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter); }
public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
private Map<String, Column> buildAllColumns() { Map<String, Column> colMap = new HashMap<>(6); Column[] cols = { new BinaryColumn(), new BooleanColumn(), new DateColumn(), new DoubleColumn(), new LongColumn(), new StringColumn() }; for (Column c : cols) colMap.put(c.colName, c); return colMap; }
@Override protected List<Partition> getSqlResult(GetHelper<List<Partition>> ctx) throws MetaException { return directSql .getPartitionsUsingProjectionAndFilterSpec(ctx.getTable(), ctx.partitionFields, includeParamKeyPattern, excludeParamKeyPattern, filterSpec, filter); }
@Override public List<Partition> dropPartitions(String dbName, String tblName, List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { return dropPartitions(dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists) .returnResults(needResult)); }