firePreEvent(new PreAddPartitionEvent(tbl, parts, this));
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException, NoSuchObjectException { for (Partition mapiPart : context.getPartitions()) { HierarcyBuilder inputBuilder = new HierarcyBuilder(); inputBuilder.addTableToOutput(getAuthServer(), mapiPart .getHandler() .get_table(mapiPart.getDbName(), mapiPart.getTableName()).getSd().getLocation();
firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this)); Set<PartValEqWrapperLite> partsToAdd = new HashSet<>(partitionSpecProxy.size()); List<Partition> partitionsToAdd = new ArrayList<>(partitionSpecProxy.size());
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
firePreEvent(new PreAddPartitionEvent(tbl, part, this));
validateAddPartition(part, partAdded); validateTableInAddPartition(tbl, partEvent.getTable()); validateAddPartition(part, prePartEvent.getPartitions().get(0)); validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
firePreEvent(new PreAddPartitionEvent(tbl, part, this));
private void authorizeAddPartition(PreAddPartitionEvent context) throws InvalidOperationException, MetaException { try { for (org.apache.hadoop.hive.metastore.api.Partition mapiPart : context.getPartitions()) { org.apache.hadoop.hive.ql.metadata.Partition wrappedPartiton = new PartitionWrapper( mapiPart, context); for(HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()){ authorizer.authorize(wrappedPartiton, HiveOperation.ALTERTABLE_ADDPARTS.getInputRequiredPrivileges(), HiveOperation.ALTERTABLE_ADDPARTS.getOutputRequiredPrivileges()); } } } catch (AuthorizationException | NoSuchObjectException e) { throw invalidOperationException(e); } catch (HiveException e) { throw metaException(e); } }
firePreEvent(new PreAddPartitionEvent(tbl, parts, this));
firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this)); List<Future<Partition>> partFutures = Lists.newArrayList(); final Table table = tbl;
firePreEvent(new PreAddPartitionEvent(tbl, parts, this));
firePreEvent(new PreAddPartitionEvent(tbl, parts, this));
firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this));
firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this));
Table tbl = null; try { firePreEvent(new PreAddPartitionEvent(part, this));
firePreEvent(new PreAddPartitionEvent(tbl, part, this));
firePreEvent(new PreAddPartitionEvent(tbl, part, this));
firePreEvent(new PreAddPartitionEvent(tbl, part, this));
firePreEvent(new PreAddPartitionEvent(tbl, part, this));