/** * Insert the given entity into the objects table using the given {@code ps}. * * @param ps the prepared statemtn to use * @param auditLogId the audit log ID associated with the Hive query for this audit log entry * @param category the category of the object * @param entity the entity associated with this query * * @throws EntityException if there's an error processing this entity * @throws SQLException if there's an error inserting into the DB. */ private static void addToObjectsTable( PreparedStatement ps, long auditLogId, ObjectCategory category, Entity entity) throws SQLException, EntityException { int psIndex = 1; ps.setLong(psIndex++, auditLogId); ps.setString(psIndex++, category.toString()); ps.setString(psIndex++, entity.getType().toString()); ps.setString(psIndex++, toIdentifierString(entity)); ps.setString(psIndex, toJson(entity)); ps.executeUpdate(); }
private static void addOutputs(HiveMetaStoreBridge hiveBridge, HiveOperation op, SortedSet<WriteEntity> sortedOutputs, StringBuilder buffer, final Map<WriteEntity, Referenceable> refs, final boolean ignoreHDFSPathsInQFName) throws HiveException { if (refs != null) { Set<String> dataSetsProcessed = new LinkedHashSet<>(); if (sortedOutputs != null) { for (WriteEntity output : sortedOutputs) { final Entity entity = output; if (!dataSetsProcessed.contains(output.getName().toLowerCase())) { //HiveOperation.QUERY type encompasses INSERT, INSERT_OVERWRITE, UPDATE, DELETE, PATH_WRITE operations if (addQueryType(op, (WriteEntity) entity)) { buffer.append(SEP); buffer.append(((WriteEntity) entity).getWriteType().name()); } if (ignoreHDFSPathsInQFName && (Type.DFS_DIR.equals(output.getType()) || Type.LOCAL_DIR.equals(output.getType()))) { LOG.debug("Skipping dfs dir output addition to process qualified name {} ", output.getName()); } else if (refs.containsKey(output)) { if ( output.getType() == Type.PARTITION || output.getType() == Type.TABLE) { final Date createTime = HiveMetaStoreBridge.getTableCreatedTime(hiveBridge.hiveClient.getTable(output.getTable().getDbName(), output.getTable().getTableName())); addDataset(buffer, refs.get(output), createTime.getTime()); } else { addDataset(buffer, refs.get(output)); } } dataSetsProcessed.add(output.getName().toLowerCase()); } } } } }
/** * Check if the given read entity is a table that has parents of type Table * Hive compiler performs a query rewrite by replacing view with its definition. In the process, tt captures both * the original view and the tables/view that it selects from . * The access authorization is only interested in the top level views and not the underlying tables. * @param readEntity * @return */ private boolean isChildTabForView(ReadEntity readEntity) { // If this is a table added for view, then we need to skip that if (!readEntity.getType().equals(Type.TABLE) && !readEntity.getType().equals(Type.PARTITION)) { return false; } if (readEntity.getParents() != null && readEntity.getParents().size() > 0) { for (ReadEntity parentEntity : readEntity.getParents()) { if (!parentEntity.getType().equals(Type.TABLE)) { return false; } } return true; } else { return false; } }
private static void addInputs(HiveMetaStoreBridge hiveBridge, HiveOperation op, SortedSet<ReadEntity> sortedInputs, StringBuilder buffer, final Map<ReadEntity, Referenceable> refs, final boolean ignoreHDFSPathsInQFName) throws HiveException { if (refs != null) { if (sortedInputs != null) { Set<String> dataSetsProcessed = new LinkedHashSet<>(); for (Entity input : sortedInputs) { if (!dataSetsProcessed.contains(input.getName().toLowerCase())) { //HiveOperation.QUERY type encompasses INSERT, INSERT_OVERWRITE, UPDATE, DELETE, PATH_WRITE operations if (ignoreHDFSPathsInQFName && (Type.DFS_DIR.equals(input.getType()) || Type.LOCAL_DIR.equals(input.getType()))) { LOG.debug("Skipping dfs dir input addition to process qualified name {} ", input.getName()); } else if (refs.containsKey(input)) { if ( input.getType() == Type.PARTITION || input.getType() == Type.TABLE) { final Date createTime = HiveMetaStoreBridge.getTableCreatedTime(hiveBridge.hiveClient.getTable(input.getTable().getDbName(), input.getTable().getTableName())); addDataset(buffer, refs.get(input), createTime.getTime()); } else { addDataset(buffer, refs.get(input)); } } dataSetsProcessed.add(input.getName().toLowerCase()); } } } } }
/** * Insert the given entity into the objects table using the given {@code ps}. * * @param ps the prepared statemtn to use * @param auditLogId the audit log ID associated with the Hive query for this audit log entry * @param category the category of the object * @param entity the entity associated with this query * * @throws EntityException if there's an error processing this entity * @throws SQLException if there's an error inserting into the DB. */ private static void addToObjectsTable( PreparedStatement ps, long auditLogId, ObjectCategory category, Entity entity) throws SQLException, EntityException { int psIndex = 1; ps.setLong(psIndex++, auditLogId); ps.setString(psIndex++, category.toString()); ps.setString(psIndex++, entity.getType().toString()); ps.setString(psIndex++, toIdentifierString(entity)); ps.setString(psIndex, toJson(entity)); ps.executeUpdate(); }
private static boolean addQueryType(HiveOperation op, WriteEntity entity) { if (entity.getWriteType() != null && HiveOperation.QUERY.equals(op)) { switch (entity.getWriteType()) { case INSERT: case INSERT_OVERWRITE: case UPDATE: case DELETE: return true; case PATH_WRITE: //Add query type only for DFS paths and ignore local paths since they are not added as outputs if ( !Type.LOCAL_DIR.equals(entity.getType())) { return true; } break; default: } } return false; }
private void deleteDatabase(HiveMetaStoreBridge dgiBridge, HiveEventContext event) { if (event.getOutputs().size() > 1) { LOG.info("Starting deletion of tables and databases with cascade {} ", event.getQueryStr()); } else { LOG.info("Starting deletion of database {} ", event.getQueryStr()); } for (WriteEntity output : event.getOutputs()) { if (Type.TABLE.equals(output.getType())) { deleteTable(dgiBridge, event, output); } else if (Type.DATABASE.equals(output.getType())) { final String dbQualifiedName = HiveMetaStoreBridge.getDBQualifiedName(dgiBridge.getClusterName(), output.getDatabase().getName()); event.addMessage( new HookNotification.EntityDeleteRequest(event.getUser(), HiveDataTypes.HIVE_DB.getName(), AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, dbQualifiedName)); } } }
private void deleteTable(HiveMetaStoreBridge dgiBridge, HiveEventContext event) { for (WriteEntity output : event.getOutputs()) { if (Type.TABLE.equals(output.getType())) { deleteTable(dgiBridge, event, output); } } }
private static boolean isPartitionBasedQuery(Set<? extends Entity> entities) { for (Entity entity : entities) { if (Type.PARTITION.equals(entity.getType())) { return true; } } return false; }