vertexBuilder.setVertexBinary(ByteString.copyFrom(submitWorkInfo.getVertexBinary())); if (submitWorkInfo.getVertexSignature() != null) { builder.setWorkSpec(vertexBuilder.build()); builder.setFragmentNumber(taskNum); builder.setAttemptNumber(attemptNum);
subBuilder.mergeFrom(workSpec_); workSpec_ = subBuilder.buildPartial();
/** * <code>optional .SignableVertexSpec vertex = 1;</code> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder> getVertexFieldBuilder() { if (vertexBuilder_ == null) { vertexBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder>( vertex_, getParentForChildren(), isClean()); vertex_ = null; } return vertexBuilder_; }
vertexBuilder.setVertexBinary(ByteString.copyFrom(submitWorkInfo.getVertexBinary())); if (submitWorkInfo.getVertexSignature() != null) { builder.setWorkSpec(vertexBuilder.build()); builder.setFragmentNumber(taskNum); builder.setAttemptNumber(attemptNum);
.setFragmentNumber(fragmentNumber) .setWorkSpec( LlapDaemonProtocolProtos.VertexOrBinary.newBuilder().setVertex( LlapDaemonProtocolProtos.SignableVertexSpec .newBuilder() LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder() .setClassName("MockProcessor").build()) .build()).build()) .setAmHost("localhost") .setAmPort(12345)
.setFragmentNumber(fragmentNumber) .setWorkSpec( VertexOrBinary.newBuilder().setVertex( SignableVertexSpec.newBuilder() .setDagName(dagName) LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder() .setClassName("MockProcessor").build()) .build()).build()) .setAmHost("localhost") .setAmPort(12345)
private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerId, TaskSpec taskSpec, FragmentRuntimeInfo fragmentRuntimeInfo, String hiveQueryId) throws IOException { SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder(); builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId()); builder.setAttemptNumber(taskSpec.getTaskAttemptID().getId()); builder.setContainerIdString(containerId.toString()); builder.setAmHost(getAmHostString()); builder.setAmPort(getAddress().getPort()); Preconditions.checkState(currentQueryIdentifierProto.getDagIndex() == taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId()); builder.setCredentialsBinary( getCredentials(getContext().getCurrentDagInfo().getCredentials())); builder.setWorkSpec(VertexOrBinary.newBuilder().setVertex(Converters.constructSignableVertexSpec( taskSpec, currentQueryIdentifierProto, getTokenIdentifier(), user, hiveQueryId)).build()); // Don't call builder.setWorkSpecSignature() - Tez doesn't sign fragments builder.setFragmentRuntimeInfo(fragmentRuntimeInfo); if (scheduler != null) { // May be null in tests // TODO: see javadoc builder.setIsGuaranteed(scheduler.isInitialGuaranteed(taskSpec.getTaskAttemptID())); } return builder.build(); }
/** * <code>optional .VertexOrBinary work_spec = 1;</code> */ public Builder mergeWorkSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary value) { if (workSpecBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && workSpec_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance()) { workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.newBuilder(workSpec_).mergeFrom(value).buildPartial(); } else { workSpec_ = value; } onChanged(); } else { workSpecBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .VertexOrBinary work_spec = 1;</code> */ public Builder setWorkSpec( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder builderForValue) { if (workSpecBuilder_ == null) { workSpec_ = builderForValue.build(); onChanged(); } else { workSpecBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
private static Builder create() { return new Builder(); }
public Builder clone() { return create().mergeFrom(buildPartial()); }
public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
@java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /**
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance()) return this; if (other.hasVertex()) { mergeVertex(other.getVertex()); } if (other.hasVertexBinary()) { setVertexBinary(other.getVertexBinary()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }