private void terminateRequest() { if (closed || requestInfo == null) { LOG.warn("No current request to terminate"); return; } TerminateFragmentRequestProto.Builder builder = TerminateFragmentRequestProto.newBuilder(); builder.setQueryIdentifier(requestInfo.queryIdentifierProto); builder.setFragmentIdentifierString(requestInfo.taskAttemptId); final String taskAttemptId = requestInfo.taskAttemptId; communicator.sendTerminateFragment(builder.build(), requestInfo.hostname, requestInfo.port, new LlapProtocolClientProxy.ExecuteRequestCallback<TerminateFragmentResponseProto>() { @Override public void setResponse(TerminateFragmentResponseProto response) { LOG.debug("Received terminate response for " + taskAttemptId); } @Override public void indicateError(Throwable t) { String msg = "Failed to terminate " + taskAttemptId; LOG.error(msg, t); // Don't propagate the error - termination was done as part of closing the client. } }); }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; hash = (53 * hash) + getQueryIdentifier().hashCode(); } if (hasFragmentIdentifierString()) { hash = (37 * hash) + FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER; hash = (53 * hash) + getFragmentIdentifierString().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (queryIdentifierBuilder_ == null) { result.queryIdentifier_ = queryIdentifier_; } else { result.queryIdentifier_ = queryIdentifierBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fragmentIdentifierString_ = fragmentIdentifierString_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
TerminateFragmentRequestProto.newBuilder().setQueryIdentifier( constructQueryIdentifierProto( taskAttemptId.getTaskID().getVertexID().getDAGId().getId()))
private void terminateRequest() { if (closed || requestInfo == null) { LOG.warn("No current request to terminate"); return; } TerminateFragmentRequestProto.Builder builder = TerminateFragmentRequestProto.newBuilder(); builder.setQueryIdentifier(requestInfo.queryIdentifierProto); builder.setFragmentIdentifierString(requestInfo.taskAttemptId); final String taskAttemptId = requestInfo.taskAttemptId; communicator.sendTerminateFragment(builder.build(), requestInfo.hostname, requestInfo.port, new LlapProtocolClientProxy.ExecuteRequestCallback<TerminateFragmentResponseProto>() { @Override public void setResponse(TerminateFragmentResponseProto response) { LOG.debug("Received terminate response for " + taskAttemptId); } @Override public void indicateError(Throwable t) { String msg = "Failed to terminate " + taskAttemptId; LOG.error(msg, t); // Don't propagate the error - termination was done as part of closing the client. } }); }
@Override public TerminateFragmentResponseProto terminateFragment( TerminateFragmentRequestProto request) throws IOException { String fragmentId = request.getFragmentIdentifierString(); LOG.info("DBG: Received terminateFragment request for {}", fragmentId); // TODO: ideally, QueryTracker should have fragment-to-query mapping. QueryIdentifier queryId = executorService.findQueryByFragment(fragmentId); // checkPermissions returns false if query is not found, throws on failure. if (queryId != null && queryTracker.checkPermissionsForQuery(queryId)) { executorService.killFragment(fragmentId); } return TerminateFragmentResponseProto.getDefaultInstance(); }
TerminateFragmentRequestProto.newBuilder().setQueryIdentifier( constructQueryIdentifierProto( taskAttemptId.getTaskID().getVertexID().getDAGId().getId()))
public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } }
public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); case 4: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); case 5: return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)) { return super.equals(obj); } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) obj; boolean result = true; result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { result = result && getQueryIdentifier() .equals(other.getQueryIdentifier()); } result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString()); if (hasFragmentIdentifierString()) { result = result && getFragmentIdentifierString() .equals(other.getFragmentIdentifierString()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto prototype) {
public Builder toBuilder() { return newBuilder(this); }
public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); }
public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, queryIdentifier_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, getFragmentIdentifierStringBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; }
public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
@Override public TerminateFragmentResponseProto terminateFragment( TerminateFragmentRequestProto request) throws IOException { String fragmentId = request.getFragmentIdentifierString(); LOG.info("DBG: Received terminateFragment request for {}", fragmentId); // TODO: ideally, QueryTracker should have fragment-to-query mapping. QueryIdentifier queryId = executorService.findQueryByFragment(fragmentId); // checkPermissions returns false if query is not found, throws on failure. if (queryId != null && queryTracker.checkPermissionsForQuery(queryId)) { executorService.killFragment(fragmentId); } return TerminateFragmentResponseProto.getDefaultInstance(); }
public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance()) return this; if (other.hasQueryIdentifier()) { mergeQueryIdentifier(other.getQueryIdentifier()); } if (other.hasFragmentIdentifierString()) { bitField0_ |= 0x00000002; fragmentIdentifierString_ = other.fragmentIdentifierString_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
/** * <code>optional string fragment_identifier_string = 2;</code> */ public Builder clearFragmentIdentifierString() { bitField0_ = (bitField0_ & ~0x00000002); fragmentIdentifierString_ = getDefaultInstance().getFragmentIdentifierString(); onChanged(); return this; } /**
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, queryIdentifier_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, getFragmentIdentifierStringBytes()); } getUnknownFields().writeTo(output); }