Refine search
@VisibleForTesting public void validateTableCols(Table table, List<String> colNames) throws MetaException { List<FieldSchema> colList = table.getSd().getCols(); for (String colName : colNames) { boolean foundCol = false; for (FieldSchema mCol : colList) { if (mCol.getName().equals(colName)) { foundCol = true; break; } } if (!foundCol) { throw new MetaException("Column " + colName + " doesn't exist in table " + table.getTableName() + " in database " + table.getDbName()); } } }
public void alterDatabase(String dbName, Database db) throws HiveException { try { getMSC().alterDatabase(dbName, db); } catch (MetaException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } catch (NoSuchObjectException e) { throw new HiveException("Database " + dbName + " does not exists.", e); } catch (TException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } } /**
/** * convert Exception to MetaException, which sets the cause to such exception * @param errorMessage the error message for this MetaException * @param e cause of the exception * @return the MetaException with the specified exception as the cause */ public static MetaException newMetaException(String errorMessage, Exception e) { MetaException metaException = new MetaException(errorMessage); if (e != null) { metaException.initCause(e); } return metaException; }
private MetaException metaException(HiveException e) { MetaException ex = new MetaException(e.getMessage()); ex.initCause(e); return ex; }
private void truncateTempTable(org.apache.hadoop.hive.metastore.api.Table table) throws MetaException, TException { boolean isAutopurge = "true".equalsIgnoreCase(table.getParameters().get("auto.purge")); try { Path location = new Path(table.getSd().getLocation()); boolean success = Hive.trashFiles(fs, statuses, conf, isAutopurge); if (!success) { throw new HiveException("Error in deleting the contents of " + location.toString()); if (needToUpdateStats(table.getParameters(), environmentContext)) { alter_table_with_environmentContext(table.getDbName(), table.getTableName(), table, environmentContext); throw new MetaException(e.getMessage());
@Override public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { // Subscriber can get notification about addition of a table in HCAT // by listening on a topic named "HCAT" and message selector string // as "HCAT_EVENT = HCAT_ADD_TABLE" if (tableEvent.getStatus()) { Table tbl = tableEvent.getTable(); IHMSHandler handler = tableEvent.getIHMSHandler(); Configuration conf = handler.getConf(); Table newTbl; try { newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "." + newTbl.getTableName().toLowerCase()); handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl); } catch (TException e) { MetaException me = new MetaException(e.toString()); me.initCause(e); throw me; } String topicName = getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase(); send(messageFactory.buildCreateTableMessage(newTbl), topicName); } }
/** * Given a database, a table and the partition key value pairs this method returns the Path object * corresponding to the partition key value pairs. It uses the table location if available else * uses the database location for constructing the path corresponding to the partition key-value * pairs * * @param db - Parent database of the given table * @param table - Table for which the partition key-values are given * @param vals - List of values for the partition keys * @return Path corresponding to the partition key-value pairs * @throws MetaException */ public Path getPartitionPath(Database db, Table table, List<String> vals) throws MetaException { List<FieldSchema> partKeys = table.getPartitionKeys(); if (partKeys == null || (partKeys.size() != vals.size())) { throw new MetaException("Invalid number of partition keys found for " + table.getTableName()); } Map<String, String> pm = new LinkedHashMap<>(vals.size()); int i = 0; for (FieldSchema key : partKeys) { pm.put(key.getName(), vals.get(i)); i++; } if (table.getSd().getLocation() != null) { return getPartitionPath(getDnsPath(new Path(table.getSd().getLocation())), pm); } else { return getDefaultPartitionPath(db, table, pm); } }
for (String dbName : hiveClient.getAllDatabases()) { numDb++; HiveObjectRef dbToRefresh = getObjToRefresh(HiveObjectType.DATABASE, dbName, null); addGrantPrivilegesToBag(policyProvider, grantDatabaseBag, HiveObjectType.DATABASE, dbName, null, null, authorizer); hiveClient.refresh_privileges(dbToRefresh, authorizer, grantDatabaseBag); LOG.debug("processing " + dbName); for (String tblName : hiveClient.getAllTables(dbName)) { numTbl++; LOG.debug("processing " + dbName + "." + tblName); try { tbl = hiveClient.getTable(dbName, tblName); for (FieldSchema fs : tbl.getPartitionKeys()) { addGrantPrivilegesToBag(policyProvider, grantColumnBag, HiveObjectType.COLUMN, dbName, tblName, fs.getName(), authorizer); for (FieldSchema fs : tbl.getSd().getCols()) { addGrantPrivilegesToBag(policyProvider, grantColumnBag, HiveObjectType.COLUMN, dbName, tblName, fs.getName(), authorizer); hiveClient.refresh_privileges(tableOfColumnsToRefresh, authorizer, grantColumnBag); } catch (MetaException e) { LOG.debug("Unable to synchronize " + tblName + ":" + e.getMessage());
@Override public void commitInsertTable(Table table, boolean overwrite) throws MetaException { LOG.debug("commit insert into table {} overwrite {}", table.getTableName(), overwrite); try { // Check if there segments to load final Path segmentDescriptorDir = getSegmentDescriptorDir(); final List<DataSegment> segmentsToLoad = fetchSegmentsMetadata(segmentDescriptorDir); final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE); //No segments to load still need to honer overwrite if (segmentsToLoad.isEmpty() && overwrite) { //disable datasource //Case it is an insert overwrite we have to disable the existing Druid DataSource DruidStorageHandlerUtils.disableDataSource(getConnector(), getDruidMetadataStorageTablesConfig(), dataSourceName); } else if (!segmentsToLoad.isEmpty()) { // at this point we have Druid segments from reducers but we need to atomically // rename and commit to metadata // Moving Druid segments and committing to druid metadata as one transaction. checkLoadStatus(loadAndCommitDruidSegments(table, overwrite, segmentsToLoad)); } } catch (IOException e) { throw new MetaException(e.getMessage()); } catch (CallbackFailedException c) { LOG.error("Error while committing transaction to druid metadata storage", c); throw new MetaException(c.getCause().getMessage()); } finally { cleanWorkingDir(); } }
@Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { if (callerVerifier != null) { CallerArguments args = new CallerArguments(tbl.getDbName()); args.tblName = tbl.getTableName(); Boolean success = callerVerifier.apply(args); if ((success != null) && !success) { throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Table operation on DB: " + args.dbName + " table: " + args.tblName); } } super.createTable(tbl); }
@Override public boolean drop_role(final String roleName) throws TException { incrementCounter("drop_role"); firePreEvent(new PreAuthorizationCallEvent(this)); if (ADMIN.equals(roleName) || PUBLIC.equals(roleName)) { throw new MetaException(PUBLIC + "," + ADMIN + " roles can't be dropped."); } Boolean ret; try { ret = getMS().removeRole(roleName); } catch (MetaException e) { throw e; } catch (NoSuchObjectException e) { ret = false; MetaStoreUtils.logAndThrowMetaException(e); } catch (Exception e) { throw new TException(e); } return ret; }
@Test public void doubleAddPrimaryKey() throws TException { Table table = testTables[0]; // Make sure get on a table with no key returns empty list PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); rqst.setCatName(table.getCatName()); List<SQLPrimaryKey> fetched = client.getPrimaryKeys(rqst); Assert.assertTrue(fetched.isEmpty()); // Single column unnamed primary key in default catalog and database List<SQLPrimaryKey> pk = new SQLPrimaryKeyBuilder() .onTable(table) .addColumn("col1") .build(metaStore.getConf()); client.addPrimaryKey(pk); try { pk = new SQLPrimaryKeyBuilder() .onTable(table) .addColumn("col2") .build(metaStore.getConf()); client.addPrimaryKey(pk); Assert.fail(); } catch (MetaException e) { Assert.assertTrue(e.getMessage().contains("Primary key already exists for")); } }
@Override public void preCreateTable(Table table) throws MetaException { if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE); } Arrays.stream(KafkaTableProperties.values()) .filter(KafkaTableProperties::isMandatory) .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()), "Set Table property " + key.getName())); // Put all the default at the pre create. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> { if (table.getParameters().get(key.getName()) == null) { table.putToParameters(key.getName(), key.getDefaultValue()); } }); }
private void initializeTransactionalProperties(Table table) throws MetaException { // All new versions of Acid tables created after the introduction of Acid version/type system // can have TRANSACTIONAL_PROPERTIES property defined. This parameter can be used to change // the operational behavior of ACID. However if this parameter is not defined, the new Acid // tables will still behave as the old ones. This is done so to preserve the behavior // in case of rolling downgrade. // Initialize transaction table properties with default string value. String tableTransactionalProperties = null; Map<String, String> parameters = table.getParameters(); if (parameters != null) { Set<String> keys = parameters.keySet(); for (String key : keys) { if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) { tableTransactionalProperties = parameters.get(key).toLowerCase(); parameters.remove(key); String validationError = validateTransactionalProperties(tableTransactionalProperties); if (validationError != null) { throw new MetaException("Invalid transactional properties specified for " + Warehouse.getQualifiedName(table) + " with the error " + validationError); } break; } } } if (tableTransactionalProperties != null) { parameters.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, tableTransactionalProperties); } }
@Override public List<String> listDatabaseNamesByPattern(String pattern) throws HCatException { List<String> dbNames = null; try { dbNames = hmsClient.getDatabases(pattern); } catch (MetaException exp) { throw new HCatException("MetaException while listing db names. " + exp.getMessage(), exp); } catch (TException e) { throw new HCatException("Transport Exception while listing db names. " + e.getMessage(), e); } return dbNames; }
@Override Table resolveTable(CompactionInfo ci) throws MetaException { try { return msc.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (TException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw new MetaException(e.toString()); } }
getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
public static String makePartNameMatcher(Table table, List<String> partVals, String defaultStr) throws MetaException { List<FieldSchema> partCols = table.getPartitionKeys(); int numPartKeys = partCols.size(); if (partVals.size() > numPartKeys) { throw new MetaException("Incorrect number of partition values." + " numPartKeys=" + numPartKeys + ", part_val=" + partVals); } partCols = partCols.subList(0, partVals.size()); // Construct a pattern of the form: partKey=partVal/partKey2=partVal2/... // where partVal is either the escaped partition value given as input, // or a regex of the form ".*" // This works because the "=" and "/" separating key names and partition key/values // are not escaped. String partNameMatcher = Warehouse.makePartName(partCols, partVals, defaultStr); // add ".*" to the regex to match anything else afterwards the partial spec. if (partVals.size() < numPartKeys) { partNameMatcher += defaultStr; } return partNameMatcher; }
if (oldPartSpec.keySet().size() != tbl.getPartCols().size() || newPartSpec.keySet().size() != tbl.getPartCols().size()) { throw new HiveException("Unable to rename partition to the same name: number of partition cols don't match. "); throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. "); String val = oldPartSpec.get(field.getName()); if (val == null || val.length() == 0) { throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty"); } else if (val != null){ getMSC().renamePartition(tbl.getDbName(), tbl.getTableName(), pvals, newPart.getTPartition()); throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (MetaException e) { throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (TException e) { throw new HiveException("Unable to rename partition. " + e.getMessage(), e);
@Nullable private HiveStorageHandler createStorageHandler(org.apache.hadoop.hive.metastore.api.Table tbl) throws MetaException { try { if (tbl == null) { return null; } HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(conf, tbl.getParameters().get(META_TABLE_STORAGE)); return storageHandler; } catch (HiveException ex) { LOG.error(StringUtils.stringifyException(ex)); throw new MetaException( "Failed to load storage handler: " + ex.getMessage()); } }