@Override protected void closeOp(boolean abort) throws HiveException { if (!abort && reducerHash != null) { reducerHash.flush(); } runTimeNumRows = numRows; super.closeOp(abort); out = null; random = null; reducerHash = null; LOG.info("{}: Total records written - {}. abort - {}", this, numRows, abort); }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); //attemptId identifies a RecordWriter initialized by LlapOutputFormatService this.attemptId = hconf.get(LLAP_OF_ID_KEY); try { //Initialize column names and types List<TypeInfo> typeInfos = new ArrayList<>(); List<String> fieldNames = new ArrayList<>(); StructObjectInspector schema = (StructObjectInspector) inputObjInspectors[0]; for(int i = 0; i < schema.getAllStructFieldRefs().size(); i++) { StructField structField = schema.getAllStructFieldRefs().get(i); fieldNames.add(structField.getFieldName()); TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(structField.getFieldObjectInspector()); typeInfos.add(typeInfo); } //Initialize an Arrow serializer converter = new Serializer(hconf, attemptId, typeInfos, fieldNames); } catch (Exception e) { LOG.error("Unable to initialize VectorFileSinkArrowOperator"); throw new RuntimeException(e); } }
@Override public String getName() { return getOperatorName(); }
@Override public void jobCloseOp(Configuration hconf, boolean success) throws HiveException { try { if ((conf != null) && isNativeTable) { Path specPath = conf.getDirName(); DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); if (conf.isLinkedFileSink() && (dpCtx != null)) { specPath = conf.getParentDir(); } Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); } } catch (IOException e) { throw new HiveException(e); } super.jobCloseOp(hconf, success); }
@Override public void jobClose(Configuration hconf, boolean success, JobCloseFeedBack feedBack) throws HiveException { try { if ((conf != null) && isNativeTable) { String specPath = conf.getDirName(); DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx); } } catch (IOException e) { throw new HiveException(e); } super.jobClose(hconf, success, feedBack); }
throw new HiveException(e); super.jobCloseOp(hconf, success);
@Override protected void closeOp(boolean abort) throws HiveException { if (!abort && reducerHash != null) { reducerHash.flush(); } runTimeNumRows = numRows; super.closeOp(abort); out = null; reducerHash = null; if (LOG.isInfoEnabled()) { LOG.info(toString() + ": records written - " + numRows); } this.runTimeNumRows = numRows; }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); ObjectInspector[] inputOIs = new ObjectInspector[conf.getTagLength()]; byte tag = conf.getTag(); inputOIs[tag] = inputObjInspectors[0]; conf.setTagOrder(new Byte[]{ tag }); int dfsMaxReplication = hconf.getInt(DFS_REPLICATION_MAX, minReplication); // minReplication value should not cross the value of dfs.replication.max minReplication = Math.min(minReplication, dfsMaxReplication); htsOperator.setConf(conf); htsOperator.initialize(hconf, inputOIs); }
@Override public void jobCloseOp(Configuration hconf, boolean success) throws HiveException { try { if ((conf != null) && isNativeTable) { Path specPath = conf.getDirName(); DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); if (conf.isLinkedFileSink() && (dpCtx != null)) { specPath = conf.getParentDir(); } Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); } } catch (IOException e) { throw new HiveException(e); } super.jobCloseOp(hconf, success); }
@Override public String getName() { return getOperatorName(); }
@Override protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); out = null; if (isLogInfoEnabled) { LOG.info(toString() + ": records written - " + numRows); } recordCounter.set(numRows); }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); ObjectInspector[] inputOIs = new ObjectInspector[conf.getTagLength()]; byte tag = conf.getTag(); inputOIs[tag] = inputObjInspectors[0]; conf.setTagOrder(new Byte[]{ tag }); numReplication = (short) hconf.getInt(MAPRED_FILE_REPLICATION, DEFAULT_REPLICATION); htsOperator.setConf(conf); htsOperator.initialize(hconf, inputOIs); }
@Override public void closeOp(boolean abort) throws HiveException { try { if (mapJoinTables == null) { LOG.debug("mapJoinTables is null"); } else { flushToFile(); } super.closeOp(abort); } catch (HiveException e) { throw e; } catch (Exception e) { throw new HiveException(e); } }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf);
@Override public void closeOp(boolean abort) throws HiveException { try { if (mapJoinTables == null) { if (isLogDebugEnabled) { LOG.debug("mapJoinTables is null"); } } else { flushToFile(); } super.closeOp(abort); } catch (HiveException e) { throw e; } catch (Exception e) { throw new HiveException(e); } }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); try {
@Override protected void closeOp(boolean abort) throws HiveException { if (!abort && reducerHash != null) { reducerHash.flush(); } super.closeOp(abort); out = null; random = null; reducerHash = null; if (isLogInfoEnabled) { LOG.info(toString() + ": records written - " + numRows); } recordCounter.set(numRows); }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); try {
@Override public void closeOp(boolean abort) throws HiveException { try { MapJoinPersistableTableContainer[] mapJoinTables = htsOperator.mapJoinTables; byte tag = conf.getTag(); if (mapJoinTables == null || mapJoinTables.length < tag || mapJoinTables[tag] == null) { LOG.debug("mapJoinTable is null"); } else if (abort) { if (LOG.isDebugEnabled()) { LOG.debug("Aborting, skip dumping side-table for tag: " + tag); } } else { String method = PerfLogger.SPARK_FLUSH_HASHTABLE + getName(); perfLogger.PerfLogBegin(CLASS_NAME, method); try { flushToFile(mapJoinTables[tag], tag); } finally { perfLogger.PerfLogEnd(CLASS_NAME, method); } } super.closeOp(abort); } catch (HiveException e) { throw e; } catch (Exception e) { throw new HiveException(e); } }
@Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); VectorExpression.doTransientInit(reduceSinkKeyExpressions); VectorExpression.doTransientInit(reduceSinkValueExpressions);