private void addServiceOutput() throws SemanticException { String hs2Hostname = getHS2Host(); if (hs2Hostname != null) { outputs.add(new WriteEntity(hs2Hostname, Type.SERVICE_NAME)); } }
private List<Pair<String, Table>> getWrittenTableList(QueryPlan plan) { List<Pair<String, Table>> result = new ArrayList<>(); Set<String> tableList = new HashSet<>(); for (WriteEntity output : plan.getOutputs()) { Table tbl; switch (output.getType()) { case TABLE: { tbl = output.getTable(); break; } case PARTITION: case DUMMYPARTITION: { tbl = output.getPartition().getTable(); break; } default: { continue; } } String fullTableName = AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName()); if (tableList.add(fullTableName)) { result.add(new ImmutablePair(fullTableName, tbl)); } } return result; }
public String toDetailedString() { return toString() + " Type=" + getTyp() + " WriteType=" + getWriteType() + " isDP=" + isDynamicPartitionWrite(); }
public String toStringDetail() { return "WriteEntity(" + toString() + ") Type=" + getType() + " WriteType=" + getWriteType(); }
private HiveLockMode getWriteEntityLockMode (WriteEntity we) { HiveLockMode lockMode = we.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED; //but the writeEntity is complete in DDL operations, instead DDL sets the writeType, so //we use it to determine its lockMode, and first we check if the writeType was set WriteEntity.WriteType writeType = we.getWriteType(); if (writeType == null) { return lockMode; } switch (writeType) { case DDL_EXCLUSIVE: return HiveLockMode.EXCLUSIVE; case DDL_SHARED: return HiveLockMode.SHARED; case DDL_NO_LOCK: return null; default: //other writeTypes related to DMLs return lockMode; } }
if (e.getType() == Entity.Type.PARTITION) { additionalOutputs.add(new WriteEntity(e.getTable(), e.getWriteType())); if (write.isDummy() || write.isPathType()) { continue; if (write.getType() == Entity.Type.DATABASE) { if (!op.equals(HiveOperation.IMPORT)){ authorizer.authorize(write.getDatabase(), null, op.getOutputRequiredPrivileges()); if (write.getType() == WriteEntity.Type.PARTITION) { Partition part = db.getPartition(write.getTable(), write .getPartition().getSpec(), false); if (part != null) { authorizer.authorize(write.getPartition(), null, op.getOutputRequiredPrivileges()); continue; if (write.getTable() != null) { authorizer.authorize(write.getTable(), null, op.getOutputRequiredPrivileges());
WriteEntity.Type type = writeEntity.getType(); Partition usedp = writeEntity.getPartition(); Table tbl = usedp.getTable(); if (AcidUtils.isTransactionalTable(tbl)) { WriteEntity.WriteType writeType = writeEntity.getWriteType(); if (writeType != WriteType.UPDATE && writeType != WriteType.DELETE) { Table tbl = writeEntity.getTable(); if (AcidUtils.isTransactionalTable(tbl)) { transactionalInQuery = true;
continue; LOG.debug("Adding " + output.getName() + " to list of lock outputs"); List<HiveLockObj> lockObj = null; if (output.getType() == WriteEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode)); } else if (output.getTyp() == WriteEntity.Type.TABLE) { lockObj = getLockObjects(plan, null, output.getTable(), null,lockMode); } else if (output.getTyp() == WriteEntity.Type.PARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), lockMode); else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), HiveLockMode.SHARED);
private boolean isSelectQuery(HiveEventContext event) { if (event.getOperation() == HiveOperation.QUERY) { //Select query has only one output if (event.getOutputs().size() == 1) { WriteEntity output = event.getOutputs().iterator().next(); /* Strangely select queries have DFS_DIR as the type which seems like a bug in hive. Filter out by checking if the path is a temporary URI * Insert into/overwrite queries onto local or dfs paths have DFS_DIR or LOCAL_DIR as the type and WriteType.PATH_WRITE and tempUri = false * Insert into a temporary table has isTempURI = false. So will not skip as expected */ if (output.getType() == Type.DFS_DIR || output.getType() == Type.LOCAL_DIR) { if (output.getWriteType() == WriteEntity.WriteType.PATH_WRITE && output.isTempURI()) { return true; } } } } return false; }
WriteEntity.WriteType wt = we.getWriteType(); if (isTargetTable(we, targetTable) && (wt == WriteEntity.WriteType.UPDATE || wt == WriteEntity.WriteType.DELETE)) { for (WriteEntity original : toRemove) { WriteEntity we = new WriteEntity(re.getPartition(), original.getWriteType()); we.setDynamicPartitionWrite(original.isDynamicPartitionWrite()); outputs.add(we);
private static void addOutputs(HiveMetaStoreBridge hiveBridge, HiveOperation op, SortedSet<WriteEntity> sortedOutputs, StringBuilder buffer, final Map<WriteEntity, Referenceable> refs, final boolean ignoreHDFSPathsInQFName) throws HiveException { if (refs != null) { Set<String> dataSetsProcessed = new LinkedHashSet<>(); if (sortedOutputs != null) { for (WriteEntity output : sortedOutputs) { final Entity entity = output; if (!dataSetsProcessed.contains(output.getName().toLowerCase())) { //HiveOperation.QUERY type encompasses INSERT, INSERT_OVERWRITE, UPDATE, DELETE, PATH_WRITE operations if (addQueryType(op, (WriteEntity) entity)) { buffer.append(SEP); buffer.append(((WriteEntity) entity).getWriteType().name()); } if (ignoreHDFSPathsInQFName && (Type.DFS_DIR.equals(output.getType()) || Type.LOCAL_DIR.equals(output.getType()))) { LOG.debug("Skipping dfs dir output addition to process qualified name {} ", output.getName()); } else if (refs.containsKey(output)) { if ( output.getType() == Type.PARTITION || output.getType() == Type.TABLE) { final Date createTime = HiveMetaStoreBridge.getTableCreatedTime(hiveBridge.hiveClient.getTable(output.getTable().getDbName(), output.getTable().getTableName())); addDataset(buffer, refs.get(output), createTime.getTime()); } else { addDataset(buffer, refs.get(output)); } } dataSetsProcessed.add(output.getName().toLowerCase()); } } } } }
public static HivePrivObjectActionType getActionType(Entity privObject) { HivePrivObjectActionType actionType = HivePrivObjectActionType.OTHER; if (privObject instanceof WriteEntity) { WriteType writeType = ((WriteEntity) privObject).getWriteType(); switch (writeType) { case INSERT: return HivePrivObjectActionType.INSERT; case INSERT_OVERWRITE: return HivePrivObjectActionType.INSERT_OVERWRITE; case UPDATE: return HivePrivObjectActionType.UPDATE; case DELETE: return HivePrivObjectActionType.DELETE; default: // Ignore other types for purposes of authorization break; } } return actionType; }
if (output.getTyp() == WriteEntity.Type.TABLE) { lockObjects.addAll(getLockObjects(output.getTable(), null, output.isComplete() ? HiveLockMode.EXCLUSIVE : HiveLockMode.SHARED)); } else if (output.getTyp() == WriteEntity.Type.PARTITION) { lockObjects.addAll(getLockObjects(null, output.getPartition(), HiveLockMode.EXCLUSIVE)); else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { lockObjects.addAll(getLockObjects(null, output.getPartition(), HiveLockMode.SHARED));
public void run(SessionState sess, Set<ReadEntity> inputs, Set<WriteEntity> outputs, UserGroupInformation ugi, boolean isExplain) throws Exception { // Don't enforce during test driver setup or shutdown. if (sess.getConf().getBoolean("hive.test.init.phase", false) || sess.getConf().getBoolean("hive.test.shutdown.phase", false)) { return; } List<String> readOnlyTables = Arrays.asList(System.getProperty("test.src.tables").split(",")); for (WriteEntity w: outputs) { if ((w.getTyp() == WriteEntity.Type.TABLE) || (w.getTyp() == WriteEntity.Type.PARTITION)) { Table t = w.getTable(); if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(t.getDbName()) && readOnlyTables.contains(t.getTableName()) && !isExplain) { throw new RuntimeException ("Cannot overwrite read-only table: " + t.getTableName()); } } } } }
/** * This modifies the logic wrt what operations are allowed in a transaction. Multi-statement * transaction support is incomplete but it makes some Acid tests cases much easier to write. */ private boolean allowOperationInATransaction(QueryPlan queryPlan) { //Acid and MM tables support Load Data with transactional semantics. This will allow Load Data //in a txn assuming we can determine the target is a suitable table type. if(queryPlan.getOperation() == HiveOperation.LOAD && queryPlan.getOutputs() != null && queryPlan.getOutputs().size() == 1) { WriteEntity writeEntity = queryPlan.getOutputs().iterator().next(); if(AcidUtils.isTransactionalTable(writeEntity.getTable())) { switch (writeEntity.getWriteType()) { case INSERT: //allow operation in a txn return true; case INSERT_OVERWRITE: //see HIVE-18154 return false; default: //not relevant for LOAD return false; } } } //todo: handle Insert Overwrite as well: HIVE-18154 return false; }
private void init() { if (knownObjects != null) { String operationName = hiveContext.getOperationName(); if (operationName != null && operationName.startsWith("CREATE") || operationName.startsWith("ALTER")) { if (CollectionUtils.isNotEmpty(hiveContext.getOutputs())) { for (WriteEntity output : hiveContext.getOutputs()) { switch (output.getType()) { case DATABASE: knownObjects.removeFromKnownDatabase(getQualifiedName(output.getDatabase())); break; case TABLE: knownObjects.removeFromKnownTable(getQualifiedName(output.getTable())); break; } } } } } } }
private static boolean addQueryType(HiveOperation op, WriteEntity entity) { if (entity.getWriteType() != null && HiveOperation.QUERY.equals(op)) { switch (entity.getWriteType()) { case INSERT: case INSERT_OVERWRITE: case UPDATE: case DELETE: return true; case PATH_WRITE: //Add query type only for DFS paths and ignore local paths since they are not added as outputs if ( !Type.LOCAL_DIR.equals(entity.getType())) { return true; } break; default: } } return false; }
private static Table getTable(WriteEntity we) { Table t = we.getTable(); if (t == null) { throw new IllegalStateException("No table info for " + we); } return t; }
switch (outIter.next().getType()) { case DFS_DIR: case LOCAL_DIR: