public Properties getProperties() { if (properties == null && tableDesc != null) { return tableDesc.getProperties(); } return properties; }
public void setOrder(String orderStr) { keySerializeInfo.getProperties().setProperty( org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_SORT_ORDER, orderStr); }
public void setNullOrder(String nullOrderStr) { keySerializeInfo.getProperties().setProperty( org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_SORT_ORDER, nullOrderStr); }
public static TableDesc getDefaultQueryOutputTableDesc(String cols, String colTypes, String fileFormat, Class<? extends Deserializer> serdeClass) { TableDesc tblDesc = getTableDesc(serdeClass, "" + Utilities.ctrlaCode, cols, colTypes, false, false, fileFormat); // enable escaping tblDesc.getProperties().setProperty(serdeConstants.ESCAPE_CHAR, "\\"); tblDesc.getProperties().setProperty(serdeConstants.SERIALIZATION_ESCAPE_CRLF, "true"); // enable extended nesting levels tblDesc.getProperties().setProperty( LazySerDeParameters.SERIALIZATION_EXTEND_ADDITIONAL_NESTING_LEVELS, "true"); return tblDesc; }
/** * Returns the null order in the key columns. * * @return null, which means default for all key columns, or a String * of the same length as key columns, that consists of only "a" * (null first) and "z" (null last). */ @Explain(displayName = "null sort order", explainLevels = { Level.EXTENDED }) public String getNullOrder() { return keySerializeInfo.getProperties().getProperty( org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_SORT_ORDER); }
@Override public void configureInputJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) { try { LOGGER.debug("Adding properties to input job conf"); Properties properties = tableDesc.getProperties(); JdbcStorageConfigManager.copyConfigurationToJob(properties, jobProperties); } catch (Exception e) { throw new IllegalArgumentException(e); } }
@Override public void configureInputJobCredentials(TableDesc tableDesc, Map<String, String> jobSecrets) { try { LOGGER.debug("Adding secrets to input job conf"); Properties properties = tableDesc.getProperties(); JdbcStorageConfigManager.copySecretsToJob(properties, jobSecrets); } catch (Exception e) { throw new IllegalArgumentException(e); } }
@Override public void configureTableJobProperties(TableDesc tableDesc, Map<String, String> jobProperties) { try { LOGGER.debug("Adding properties to input job conf"); Properties properties = tableDesc.getProperties(); JdbcStorageConfigManager.copyConfigurationToJob(properties, jobProperties); } catch (Exception e) { throw new IllegalArgumentException(e); } }
public LoadTableDesc(final Path sourcePath, final TableDesc table, final Map<String, String> partitionSpec, final LoadFileType loadFileType, final AcidUtils.Operation writeType, Long currentWriteId) { super(sourcePath, writeType); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("creating part LTD from " + sourcePath + " to " + ((table.getProperties() == null) ? "null" : table.getTableName())); } init(table, partitionSpec, loadFileType, currentWriteId); }
@Explain(displayName = "properties", explainLevels = { Level.EXTENDED }) public Map getPropertiesExplain() { return PlanUtils.getPropertiesExplain(getProperties()); }
private VectorPartitionContext(PartitionDesc partDesc) { this.partDesc = partDesc; TableDesc td = partDesc.getTableDesc(); // Use table properties in case of unpartitioned tables, // and the union of table properties and partition properties, with partition // taking precedence, in the case of partitioned tables Properties overlayedProps = SerDeUtils.createOverlayedProperties(td.getProperties(), partDesc.getProperties()); Map<String, String> partSpec = partDesc.getPartSpec(); tableName = String.valueOf(overlayedProps.getProperty("name")); partName = String.valueOf(partSpec); }
public static void configureJobConf(TableDesc tableDesc, JobConf jobConf) throws Exception { HBaseSerDeParameters serdeParams = new HBaseSerDeParameters(jobConf, tableDesc.getProperties(), HBaseSerDe.class.getName()); serdeParams.getKeyFactory().configureJobConf(tableDesc, jobConf); } }
public static int getDPColOffset(FileSinkDesc conf) { if (conf.getWriteType() == AcidUtils.Operation.DELETE) { // For deletes, there is only ROW__ID in non-partitioning, non-bucketing columns. //See : UpdateDeleteSemanticAnalyzer::reparseAndSuperAnalyze() for details. return 1; } else if (conf.getWriteType() == AcidUtils.Operation.UPDATE) { // For updates, ROW__ID is an extra column at index 0. //See : UpdateDeleteSemanticAnalyzer::reparseAndSuperAnalyze() for details. return getColumnNames(conf.getTableInfo().getProperties()).size() + 1; } else { return getColumnNames(conf.getTableInfo().getProperties()).size(); } }
protected ValidWriteIdList getMmValidWriteIds( JobConf conf, TableDesc table, ValidWriteIdList validWriteIdList) throws IOException { if (!AcidUtils.isInsertOnlyTable(table.getProperties())) return null; if (validWriteIdList == null) { validWriteIdList = AcidUtils.getTableValidWriteIdList( conf, table.getTableName()); if (validWriteIdList == null) { throw new IOException("Insert-Only table: " + table.getTableName() + " is missing from the ValidWriteIdList config: " + conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY)); } } return validWriteIdList; }
public boolean isFullAcidTable() { if(getTable() != null) { return AcidUtils.isFullAcidTable(table); } else { return AcidUtils.isTablePropertyTransactional(getTableInfo().getProperties()) && !AcidUtils.isInsertOnlyTable(getTableInfo().getProperties()); } }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); TableDesc tbl = this.getConf().getTbl(); try { Deserializer serde = tbl.getDeserializerClass().newInstance(); SerDeUtils.initializeSerDe(serde, hconf, tbl.getProperties(), null); this.outputObjInspector = serde.getObjectInspector(); } catch (Exception e) { LOG.error("Generating output obj inspector from dummy object error", e); e.printStackTrace(); } }
private Object deserializeValue(BytesWritable valueWritable, byte tag) throws HiveException { try { return inputValueDeserializer.deserialize(valueWritable); } catch (SerDeException e) { throw new HiveException( "Hive Runtime Error: Unable to deserialize reduce input value (tag=" + tag + ") from " + Utilities.formatBinaryString(valueWritable.getBytes(), 0, valueWritable.getLength()) + " with properties " + valueTableDesc.getProperties(), e); } }
/** * Return a deserializer object corresponding to the partitionDesc. */ public Deserializer getDeserializer(Configuration conf) throws Exception { Properties schema = getProperties(); String clazzName = getDeserializerClassName(); Deserializer deserializer = ReflectionUtil.newInstance(conf.getClassByName(clazzName) .asSubclass(Deserializer.class), conf); SerDeUtils.initializeSerDe(deserializer, conf, getTableDesc().getProperties(), schema); return deserializer; }
private boolean needConversion(PartitionDesc partitionDesc) { boolean isAcid = AcidUtils.isTablePropertyTransactional(partitionDesc.getTableDesc().getProperties()); if (Utilities.isSchemaEvolutionEnabled(job, isAcid) && Utilities.isInputFileFormatSelfDescribing(partitionDesc)) { return false; } return needConversion(partitionDesc.getTableDesc(), Arrays.asList(partitionDesc)); }
public boolean isMmTable() { if (getTable() != null) { return AcidUtils.isInsertOnlyTable(table.getParameters()); } else { // Dynamic Partition Insert case return AcidUtils.isInsertOnlyTable(getTableInfo().getProperties()); } } public boolean isFullAcidTable() {