@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognizeRequest other = (com.google.cloud.speech.v1.RecognizeRequest) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (hasAudio() == other.hasAudio()); if (hasAudio()) { result = result && getAudio().equals(other.getAudio()); } result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public com.google.cloud.speech.v1.RecognizeRequest getDefaultInstanceForType() { return com.google.cloud.speech.v1.RecognizeRequest.getDefaultInstance(); }
@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
/** * * * <pre> * *Required* The audio data to be recognized. * </pre> * * <code>.google.cloud.speech.v1.RecognitionAudio audio = 2;</code> */ public com.google.cloud.speech.v1.RecognitionAudioOrBuilder getAudioOrBuilder() { return getAudio(); }
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); }
if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { done = true; } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable();
@java.lang.Override public com.google.cloud.speech.v1.RecognizeRequest buildPartial() { com.google.cloud.speech.v1.RecognizeRequest result = new com.google.cloud.speech.v1.RecognizeRequest(this); if (configBuilder_ == null) { result.config_ = config_; } else { result.config_ = configBuilder_.build(); } if (audioBuilder_ == null) { result.audio_ = audio_; } else { result.audio_ = audioBuilder_.build(); } onBuilt(); return result; }
@java.lang.Override public com.google.cloud.speech.v1.RecognizeRequest build() { com.google.cloud.speech.v1.RecognizeRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
@java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (config_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getConfig()); } if (audio_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getAudio()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasConfig()) { hash = (37 * hash) + CONFIG_FIELD_NUMBER; hash = (53 * hash) + getConfig().hashCode(); } if (hasAudio()) { hash = (37 * hash) + AUDIO_FIELD_NUMBER; hash = (53 * hash) + getAudio().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@Test @SuppressWarnings("all") public void recognizeTest() { RecognizeResponse expectedResponse = RecognizeResponse.newBuilder().build(); mockSpeech.addResponse(expectedResponse); RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); RecognizeResponse actualResponse = client.recognize(config, audio); Assert.assertEquals(expectedResponse, actualResponse); List<GeneratedMessageV3> actualRequests = mockSpeech.getRequests(); Assert.assertEquals(1, actualRequests.size()); RecognizeRequest actualRequest = (RecognizeRequest) actualRequests.get(0); Assert.assertEquals(config, actualRequest.getConfig()); Assert.assertEquals(audio, actualRequest.getAudio()); Assert.assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); }
.setRequestMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.RecognizeRequest.getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller(
RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build(); return recognize(request);
public Builder mergeFrom(com.google.cloud.speech.v1.RecognizeRequest other) { if (other == com.google.cloud.speech.v1.RecognizeRequest.getDefaultInstance()) return this; if (other.hasConfig()) { mergeConfig(other.getConfig()); } if (other.hasAudio()) { mergeAudio(other.getAudio()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
.setRequestMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.RecognizeRequest.getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller(