public DatasetStateMutator(String username, VirtualDatasetState virtualDatasetState, boolean preview) { super(); this.virtualDatasetState = ProtostuffUtil.copy(virtualDatasetState); this.preview = preview; }
/** * Return clone of the sourceConfig * @return */ public SourceConfig getConfig() { return ProtostuffUtil.copy(sourceConfig); }
public VirtualDatasetState getNewState() { return copy(newState); } public Set<String> getAddedColumns() {
public TransformActor( VirtualDatasetState initialState, boolean preview, String username, QueryExecutor executor) { super(); this.m = new DatasetStateMutator(username, copy(initialState), preview); this.preview = preview; this.username = username; this.executor = executor; }
/** * * @param to immutable * @param from immutable * @param <T> * @return result of merge from into to */ public static <T extends Message<T>> T merge(T to, T from) throws IOException { Schema<T> schema = from.cachedSchema(); ByteArrayOutputStream out = new ByteArrayOutputStream(); GraphIOUtil.writeDelimitedTo(new DataOutputStream(out), from, schema); ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray()); T cloneTo = copy(to); GraphIOUtil.mergeDelimitedFrom(in, cloneTo, schema); return cloneTo; }
@Override public SubScan getSpecificScan(List<SplitWork> work) throws ExecutionSetupException { final BatchSchema schema = cachedRelDataType == null ? getDataset().getSchema(): BatchSchema.fromCalciteRowType(cachedRelDataType); // Create an abridged version of the splits to save network bytes. List<DatasetSplit> splits = work.stream().map( workSplit -> ProtostuffUtil.copy(workSplit.getSplit()) .setExtendedProperty(convertToScanXAttr(workSplit.getSplit().getExtendedProperty())) ).collect(Collectors.toList()); return new ParquetSubScan(dataset.getFormatSettings(), splits, getUserName(), schema, getDataset().getName().getPathComponents(), filter == null ? null : filter.getConditions(), dataset.getStoragePluginId(), columns, dataset.getReadDefinition().getPartitionColumnsList(), globalDictionaryEncodedColumns, dataset.getReadDefinition().getExtendedProperty()); }
@Override public AttemptObserver newAttempt(AttemptId attemptId, AttemptReason reason) { // first attempt is already part of the job if (attemptId.getAttemptNum() > 0) { // create a new JobAttempt for the new attempt final JobInfo jobInfo = ProtostuffUtil.copy(job.getJobAttempt().getInfo()) .setStartTime(System.currentTimeMillis()) // use different startTime for every attempt .setFailureInfo(null) .setDetailedFailureInfo(null) .setResultMetadataList(new ArrayList<ArrowFileMetadata>()); final JobAttempt jobAttempt = new JobAttempt() .setInfo(jobInfo) .setReason(reason) .setEndpoint(identity) .setDetails(new JobDetails()) .setState(ENQUEUED); job.addAttempt(jobAttempt); } job.getJobAttempt().setAttemptId(AttemptIdUtils.toString(attemptId)); if (isInternal) { attemptObserver = new JobResultListener(attemptId, job, allocator, statusListener, listeners); } else { attemptObserver = new ExternalJobResultListener(attemptId, responseHandler, job, allocator); } return attemptObserver; }