public static ReadEntity toReadEntity(Path location, HiveConf conf) throws SemanticException { try { Path path = tryQualifyPath(location, conf); return new ReadEntity(path, FileUtils.isLocalFile(conf, path.toUri())); } catch (Exception e) { throw new SemanticException(e); } }
ReadEntity.Type typ = re.getType(); switch(typ) { String dbName = re.getTable().getDbName(); String tblName = re.getTable().getTableName(); Table t = db.getTable(dbName, tblName); t.setLastAccessTime(lastAccessTime); String dbName = re.getTable().getDbName(); String tblName = re.getTable().getTableName(); Partition p = re.getPartition(); Table t = db.getTable(dbName, tblName); p = db.getPartition(t, p.getSpec(), false);
private void putAccessedColumnsToReadEntity(HashSet<ReadEntity> inputs, ColumnAccessInfo columnAccessInfo) { Map<String, List<String>> tableToColumnAccessMap = columnAccessInfo.getTableToColumnAccessMap(); if (tableToColumnAccessMap != null && !tableToColumnAccessMap.isEmpty()) { for(ReadEntity entity: inputs) { List<String> cols; switch (entity.getType()) { case TABLE: cols = tableToColumnAccessMap.get(entity.getTable().getCompleteName()); if (cols != null && !cols.isEmpty()) { entity.getAccessedColumns().addAll(cols); } break; case PARTITION: cols = tableToColumnAccessMap.get(entity.getPartition().getTable().getCompleteName()); if (cols != null && !cols.isEmpty()) { entity.getAccessedColumns().addAll(cols); } break; default: // no-op } } } }
public static ReadEntity addInput(Set<ReadEntity> inputs, ReadEntity newInput, boolean mergeIsDirectFlag) { // If the input is already present, make sure the new parent is added to the input. if (inputs.contains(newInput)) { for (ReadEntity input : inputs) { if (input.equals(newInput)) { if ((newInput.getParents() != null) && (!newInput.getParents().isEmpty())) { input.getParents().addAll(newInput.getParents()); input.setDirect(input.isDirect() || newInput.isDirect()); } else if (mergeIsDirectFlag) { input.setDirect(input.isDirect() || newInput.isDirect()); } return input; } } assert false; } else { inputs.add(newInput); return newInput; } // make compile happy return null; }
for (Entity e : sem.getInputs()) { if (e.getType() == Entity.Type.PARTITION) { additionalInputs.add(new ReadEntity(e.getTable())); if (read.isDummy() || read.isPathType() || read.getType() == Entity.Type.DATABASE) { continue; Table tbl = read.getTable(); if ((read.getPartition() != null) || (tbl != null && tbl.isPartitioned())) { String tblName = tbl.getTableName(); if (tableUsePartLevelAuth.get(tblName) == null) { if (read.isDummy() || read.isPathType() || !read.isDirect()) { continue; if (read.getType() == Entity.Type.DATABASE) { authorizer.authorize(read.getDatabase(), op.getInputRequiredPrivileges(), null); continue; Table tbl = read.getTable(); if (tbl.isView() && sem instanceof SemanticAnalyzer) { tab2Cols.put(tbl, sem.getColumnAccessInfo().getTableToColumnAccessMap().get(tbl.getCompleteName())); if (read.getPartition() != null) { Partition partition = read.getPartition(); tbl = partition.getTable();
if (!input.needsLock()) { continue; LOG.debug("Adding " + input.getName() + " to list of lock inputs"); if (input.getType() == ReadEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED)); } else if (input.getType() == ReadEntity.Type.TABLE) { lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, HiveLockMode.SHARED)); } else { lockObjects.addAll(getLockObjects(plan, null, null, input.getPartition(), HiveLockMode.SHARED));
if (!input.needsLock() || input.isUpdateOrDelete() || !AcidUtils.needsLock(input)) { switch (input.getType()) { case DATABASE: compBuilder.setDbName(input.getDatabase().getName()); break; t = input.getTable(); compBuilder.setDbName(t.getDbName()); compBuilder.setTableName(t.getTableName()); compBuilder.setPartitionName(input.getPartition().getName()); t = input.getPartition().getTable(); compBuilder.setDbName(t.getDbName()); compBuilder.setTableName(t.getTableName());
JSONArray inputPartitionInfo = new JSONArray(); for (ReadEntity input: work.getInputs()) { switch (input.getType()) { case TABLE: Table table = input.getTable(); JSONObject tableInfo = new JSONObject(); tableInfo.put("tablename", table.getCompleteName()); tableInfo.put("tabletype", table.getTableType().toString()); if ((input.getParents() != null) && (!input.getParents().isEmpty())) { tableInfo.put("tableParents", input.getParents().toString()); case PARTITION: JSONObject partitionInfo = new JSONObject(); partitionInfo.put("partitionName", input.getPartition().getCompleteName()); if ((input.getParents() != null) && (!input.getParents().isEmpty())) { partitionInfo.put("partitionParents", input.getParents().toString());
newInput = new ReadEntity(part, parentViewInfo, isDirectRead); } else { newInput = new ReadEntity(part.getTable(), parentViewInfo, isDirectRead); if ((newInput.getParents() != null) && (!newInput.getParents().isEmpty())) { input.getParents().addAll(newInput.getParents()); input.setDirect(input.isDirect() || newInput.isDirect());
if (entity.getType() == Entity.Type.PARTITION) { addToObjectsTable( ps, auditLogId, ObjectCategory.REFERENCE_TABLE, new ReadEntity(entity.getT()) ); if (renameOperation) { for (ReadEntity entity : readEntities) { if (renamePartition && entity.getType() == Entity.Type.TABLE) { continue;
assertEquals("default@" + view1, CheckInputReadEntity.readEntities[0].getName()); assertEquals("default@" + tab1, CheckInputReadEntity.readEntities[1].getName()); assertFalse("Table is not direct input", CheckInputReadEntity.readEntities[1].isDirect()); assertEquals("default@" + view1, CheckInputReadEntity.readEntities[1] .getParents() .iterator().next().getName()); assertEquals("default@" + tab2, CheckInputReadEntity.readEntities[2].getName()); assertFalse("Table is not direct input", CheckInputReadEntity.readEntities[2].isDirect()); assertEquals("default@" + view1, CheckInputReadEntity.readEntities[2] .getParents() .iterator().next().getName());
/** * Underlying table of view should be marked as indirect * * @throws ParseException */ @Test public void testSelectEntityInDirect() throws ParseException { Driver driver = createDriver(); int ret = driver.compile("select * from v1"); assertEquals("Checking command success", 0, ret); assertEquals(2, CheckInputReadEntityDirect.readEntities.size()); for (ReadEntity readEntity : CheckInputReadEntityDirect.readEntities) { if (readEntity.getName().equals("default@t1")) { assertFalse("not direct", readEntity.isDirect()); } else if (readEntity.getName().equals("default@v1")) { assertTrue("direct", readEntity.isDirect()); } else { fail("unexpected entity name " + readEntity.getName()); } } }
private void analyzeSwitchDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); Database database = getDatabase(dbName, true); ReadEntity dbReadEntity = new ReadEntity(database); dbReadEntity.noLockNeeded(); inputs.add(dbReadEntity); SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), switchDatabaseDesc))); }
/** * Check if the given read entity is a table that has parents of type Table * Hive compiler performs a query rewrite by replacing view with its definition. In the process, tt captures both * the original view and the tables/view that it selects from . * The access authorization is only interested in the top level views and not the underlying tables. * @param readEntity * @return */ private boolean isChildTabForView(ReadEntity readEntity) { // If this is a table added for view, then we need to skip that if (!readEntity.getType().equals(Type.TABLE) && !readEntity.getType().equals(Type.PARTITION)) { return false; } if (readEntity.getParents() != null && readEntity.getParents().size() > 0) { for (ReadEntity parentEntity : readEntity.getParents()) { if (!parentEntity.getType().equals(Type.TABLE)) { return false; } } return true; } else { return false; } }
/** * Add column level hierarchy to inputHierarchy * * @param inputHierarchy * @param entity * @param sentryContext */ private void addColumnHierarchy(List<List<DBModelAuthorizable>> inputHierarchy, ReadEntity entity) { List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>(); entityHierarchy.add(hiveAuthzBinding.getAuthServer()); entityHierarchy.addAll(getAuthzHierarchyFromEntity(entity)); switch (entity.getType()) { case TABLE: case PARTITION: List<String> cols = entity.getAccessedColumns(); for (String col : cols) { List<DBModelAuthorizable> colHierarchy = new ArrayList<DBModelAuthorizable>(entityHierarchy); colHierarchy.add(new Column(col)); inputHierarchy.add(colHierarchy); } break; default: inputHierarchy.add(entityHierarchy); } }
boolean part = read.getPartition() != null; if (part) { part2Cols.put(read.getPartition(), new ArrayList<String>()); } else { tab2Cols.put(read.getTable(), new ArrayList<String>()); if (read.getPartition() != null) { List<String> cols = part2Cols.get(read.getPartition()); if (cols != null && cols.size() > 0) { ss.getAuthorizer().authorize(read.getPartition().getTable(), read.getPartition(), cols, op.getInputRequiredPrivileges(), null); } else { ss.getAuthorizer().authorize(read.getPartition(), op.getInputRequiredPrivileges(), null); } else if (read.getTable() != null) { List<String> cols = tab2Cols.get(read.getTable()); if (cols != null && cols.size() > 0) { ss.getAuthorizer().authorize(read.getTable(), null, cols, op.getInputRequiredPrivileges(), null); } else { ss.getAuthorizer().authorize(read.getTable(), op.getInputRequiredPrivileges(), null);
Table oldTable = getHiveContext().getInputs().iterator().next().getTable(); Table newTable = getHiveContext().getOutputs().iterator().next().getTable();
/** * If the optimizer has determined that it only has to read some of the partitions of the * target table to satisfy the query, then we know that the write side of update/delete * (and update/delete parts of merge) * can only write (at most) that set of partitions (since we currently don't allow updating * partition (or bucket) columns). So we want to replace the table level * WriteEntity in the outputs with WriteEntity for each of these partitions * ToDo: see if this should be moved to SemanticAnalyzer itself since it applies to any * insert which does a select against the same table. Then SemanticAnalyzer would also * be able to not use DP for the Insert... * * Note that the Insert of Merge may be creating new partitions and writing to partitions * which were not read (WHEN NOT MATCHED...). WriteEntity for that should be created * in MoveTask (or some other task after the query is complete). */ private List<ReadEntity> getRestrictedPartitionSet(Table targetTable) { List<ReadEntity> partitionsRead = new ArrayList<>(); for (ReadEntity re : inputs) { if (re.isFromTopLevelQuery && re.getType() == Entity.Type.PARTITION && isTargetTable(re, targetTable)) { partitionsRead.add(re); } } return partitionsRead; }
/** * No views in the query so it should be a direct entity * * @throws ParseException */ @Test public void testSelectEntityDirect() throws ParseException { Driver driver = createDriver(); int ret = driver.compile("select * from t1"); assertEquals("Checking command success", 0, ret); assertEquals(1, CheckInputReadEntityDirect.readEntities.size()); assertTrue("isDirect", CheckInputReadEntityDirect.readEntities.iterator().next().isDirect()); }