@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
@java.lang.Override public com.google.cloud.speech.v1.StreamingRecognitionConfig buildPartial() { com.google.cloud.speech.v1.StreamingRecognitionConfig result = new com.google.cloud.speech.v1.StreamingRecognitionConfig(this); if (configBuilder_ == null) { result.config_ = config_; } else { result.config_ = configBuilder_.build(); } result.singleUtterance_ = singleUtterance_; result.interimResults_ = interimResults_; onBuilt(); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.StreamingRecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1.StreamingRecognitionConfig other = (com.google.cloud.speech.v1.StreamingRecognitionConfig) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (getSingleUtterance() == other.getSingleUtterance()); result = result && (getInterimResults() == other.getInterimResults()); result = result && unknownFields.equals(other.unknownFields); return result; }
if (streamingRequestCase_ == 1 && streamingRequest_ != com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance()) { streamingRequest_ = com.google.cloud.speech.v1.StreamingRecognitionConfig.newBuilder( (com.google.cloud.speech.v1.StreamingRecognitionConfig) streamingRequest_) .mergeFrom(value)
@java.lang.Override public com.google.cloud.speech.v1.StreamingRecognitionConfig getDefaultInstanceForType() { return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); }
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { return getConfig(); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.StreamingRecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.StreamingRecognizeRequest other = (com.google.cloud.speech.v1.StreamingRecognizeRequest) obj; boolean result = true; result = result && getStreamingRequestCase().equals(other.getStreamingRequestCase()); if (!result) return false; switch (streamingRequestCase_) { case 1: result = result && getStreamingConfig().equals(other.getStreamingConfig()); break; case 2: result = result && getAudioContent().equals(other.getAudioContent()); break; case 0: default: } result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * Provides information to the recognizer that specifies how to process the * request. The first `StreamingRecognizeRequest` message must contain a * `streaming_config` message. * </pre> * * <code>.google.cloud.speech.v1.StreamingRecognitionConfig streaming_config = 1;</code> */ public com.google.cloud.speech.v1.StreamingRecognitionConfig getStreamingConfig() { if (streamingRequestCase_ == 1) { return (com.google.cloud.speech.v1.StreamingRecognitionConfig) streamingRequest_; } return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); } /**
@java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (config_ != null) { output.writeMessage(1, getConfig()); } if (singleUtterance_ != false) { output.writeBool(2, singleUtterance_); } if (interimResults_ != false) { output.writeBool(3, interimResults_); } unknownFields.writeTo(output); }
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasConfig()) { hash = (37 * hash) + CONFIG_FIELD_NUMBER; hash = (53 * hash) + getConfig().hashCode(); } hash = (37 * hash) + SINGLE_UTTERANCE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSingleUtterance()); hash = (37 * hash) + INTERIM_RESULTS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getInterimResults()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
/** * * * <pre> * Provides information to the recognizer that specifies how to process the * request. The first `StreamingRecognizeRequest` message must contain a * `streaming_config` message. * </pre> * * <code>.google.cloud.speech.v1.StreamingRecognitionConfig streaming_config = 1;</code> */ public com.google.cloud.speech.v1.StreamingRecognitionConfigOrBuilder getStreamingConfigOrBuilder() { if (streamingRequestCase_ == 1) { return (com.google.cloud.speech.v1.StreamingRecognitionConfig) streamingRequest_; } return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); }
@java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (config_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getConfig()); } if (singleUtterance_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, singleUtterance_); } if (interimResults_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(3, interimResults_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; }
.build(); StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();
public Builder mergeFrom(com.google.cloud.speech.v1.StreamingRecognitionConfig other) { if (other == com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance()) return this; if (other.hasConfig()) { mergeConfig(other.getConfig()); } if (other.getSingleUtterance() != false) { setSingleUtterance(other.getSingleUtterance()); } if (other.getInterimResults() != false) { setInterimResults(other.getInterimResults()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
/** * * * <pre> * Provides information to the recognizer that specifies how to process the * request. The first `StreamingRecognizeRequest` message must contain a * `streaming_config` message. * </pre> * * <code>.google.cloud.speech.v1.StreamingRecognitionConfig streaming_config = 1;</code> */ public com.google.cloud.speech.v1.StreamingRecognitionConfig getStreamingConfig() { if (streamingConfigBuilder_ == null) { if (streamingRequestCase_ == 1) { return (com.google.cloud.speech.v1.StreamingRecognitionConfig) streamingRequest_; } return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); } else { if (streamingRequestCase_ == 1) { return streamingConfigBuilder_.getMessage(); } return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); } } /**
.build(); StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();
/** * * * <pre> * Provides information to the recognizer that specifies how to process the * request. The first `StreamingRecognizeRequest` message must contain a * `streaming_config` message. * </pre> * * <code>.google.cloud.speech.v1.StreamingRecognitionConfig streaming_config = 1;</code> */ public com.google.cloud.speech.v1.StreamingRecognitionConfigOrBuilder getStreamingConfigOrBuilder() { if ((streamingRequestCase_ == 1) && (streamingConfigBuilder_ != null)) { return streamingConfigBuilder_.getMessageOrBuilder(); } else { if (streamingRequestCase_ == 1) { return (com.google.cloud.speech.v1.StreamingRecognitionConfig) streamingRequest_; } return com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance(); } } /**
StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();
if (!(streamingRequestCase_ == 1)) { streamingRequest_ = com.google.cloud.speech.v1.StreamingRecognitionConfig.getDefaultInstance();