private void authorizeDropPartition(PreDropPartitionEvent context) throws InvalidOperationException, MetaException { try { for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { if (authorizer instanceof HiveMultiPartitionAuthorizationProviderBase) { // Authorize all dropped-partitions in one shot. authorizeDropMultiPartition((HiveMultiPartitionAuthorizationProviderBase)authorizer, context); } else { // Authorize individually. TableWrapper table = new TableWrapper(context.getTable()); Iterator<Partition> partitionIterator = context.getPartitionIterator(); while (partitionIterator.hasNext()) { authorizer.authorize( new PartitionWrapper(table, partitionIterator.next()), HiveOperation.ALTERTABLE_DROPPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_DROPPARTS.getOutputRequiredPrivileges() ); } } } } catch (AuthorizationException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeDropPartition(PreDropPartitionEvent context) throws InvalidOperationException, MetaException { try { for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { if (authorizer instanceof HiveMultiPartitionAuthorizationProviderBase) { // Authorize all dropped-partitions in one shot. authorizeDropMultiPartition((HiveMultiPartitionAuthorizationProviderBase)authorizer, context); } else { // Authorize individually. TableWrapper table = new TableWrapper(context.getTable()); Iterator<Partition> partitionIterator = context.getPartitionIterator(); while (partitionIterator.hasNext()) { authorizer.authorize( new PartitionWrapper(table, partitionIterator.next()), HiveOperation.ALTERTABLE_DROPPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_DROPPARTS.getOutputRequiredPrivileges() ); } } } } catch (AuthorizationException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeAlterPartition(PreAlterPartitionEvent context) throws InvalidOperationException, MetaException { try { org.apache.hadoop.hive.metastore.api.Partition mapiPart = context.getNewPartition(); org.apache.hadoop.hive.ql.metadata.Partition wrappedPartition = new PartitionWrapper( mapiPart, context); for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { authorizer.authorize(wrappedPartition, null, new Privilege[]{Privilege.ALTER_METADATA}); } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd()); } initialize(new TableWrapper(t),wrapperApiPart); } }
public PartitionWrapper(org.apache.hadoop.hive.ql.metadata.Table table, org.apache.hadoop.hive.metastore.api.Partition mapiPart) throws HiveException { initialize(table,mapiPart); }
@Override public org.apache.hadoop.hive.ql.metadata.Partition apply(Partition partition) { try { return new PartitionWrapper(table, partition); } catch (Exception exception) { LOG.error("Could not construct partition-object for: " + partition, exception); throw new RuntimeException(exception); } } });
private void authorizeDropPartition(PreDropPartitionEvent context) throws InvalidOperationException, MetaException { try { for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { if (authorizer instanceof HiveMultiPartitionAuthorizationProviderBase) { // Authorize all dropped-partitions in one shot. authorizeDropMultiPartition((HiveMultiPartitionAuthorizationProviderBase)authorizer, context); } else { // Authorize individually. TableWrapper table = new TableWrapper(context.getTable()); Iterator<Partition> partitionIterator = context.getPartitionIterator(); while (partitionIterator.hasNext()) { authorizer.authorize( new PartitionWrapper(table, partitionIterator.next()), HiveOperation.ALTERTABLE_DROPPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_DROPPARTS.getOutputRequiredPrivileges() ); } } } } catch (AuthorizationException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeAlterPartition(PreAlterPartitionEvent context) throws InvalidOperationException, MetaException { try { org.apache.hadoop.hive.metastore.api.Partition mapiPart = context.getNewPartition(); org.apache.hadoop.hive.ql.metadata.Partition wrappedPartition = new PartitionWrapper( mapiPart, context); for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { authorizer.authorize(wrappedPartition, null, new Privilege[]{Privilege.ALTER_METADATA}); } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() : MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf()); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( catName, mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
private void authorizeAlterPartition(PreAlterPartitionEvent context) throws InvalidOperationException, MetaException { try { org.apache.hadoop.hive.metastore.api.Partition mapiPart = context.getNewPartition(); org.apache.hadoop.hive.ql.metadata.Partition wrappedPartition = new PartitionWrapper( mapiPart, context); for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { authorizer.authorize(wrappedPartition, null, new Privilege[]{Privilege.ALTER_METADATA}); } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
public PartitionWrapper(org.apache.hadoop.hive.ql.metadata.Table table, org.apache.hadoop.hive.metastore.api.Partition mapiPart) throws HiveException { initialize(table,mapiPart); }
@Override public org.apache.hadoop.hive.ql.metadata.Partition apply(Partition partition) { try { return new PartitionWrapper(table, partition); } catch (Exception exception) { LOG.error("Could not construct partition-object for: " + partition, exception); throw new RuntimeException(exception); } } });
public PartitionWrapper(org.apache.hadoop.hive.ql.metadata.Table table, org.apache.hadoop.hive.metastore.api.Partition mapiPart) throws HiveException { initialize(table,mapiPart); }
@Override public org.apache.hadoop.hive.ql.metadata.Partition apply(Partition partition) { try { return new PartitionWrapper(table, partition); } catch (Exception exception) { LOG.error("Could not construct partition-object for: " + partition, exception); throw new RuntimeException(exception); } } });