/** * * * <pre> * Provides information to the recognizer that specifies how to process the * request. The first `StreamingRecognizeRequest` message must contain a * `streaming_config` message. * </pre> * * <code>.google.cloud.speech.v1.StreamingRecognitionConfig streaming_config = 1;</code> */ public Builder setStreamingConfig( com.google.cloud.speech.v1.StreamingRecognitionConfig.Builder builderForValue) { if (streamingConfigBuilder_ == null) { streamingRequest_ = builderForValue.build(); onChanged(); } else { streamingConfigBuilder_.setMessage(builderForValue.build()); } streamingRequestCase_ = 1; return this; } /**
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
.build(); StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();
.setSingleUtterance(!useVideoModel && SINGLE_UTTERANCE_ONLY) .build();
/** * Starts recognizing speech audio. * * @param sampleRate The sample rate of the audio. */ public void startRecognizing(int sampleRate) { if (mApi == null) { Log.w(TAG, "API not ready. Ignoring the request."); return; } // Configure the API mRequestObserver = mApi.streamingRecognize(mResponseObserver); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder() .setConfig(RecognitionConfig.newBuilder() .setLanguageCode("en-US") .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) .setSampleRateHertz(sampleRate) .build() ) .setInterimResults(true) .setSingleUtterance(true) .build(); StreamingRecognizeRequest streamingRecognizeRequest = StreamingRecognizeRequest.newBuilder(). setStreamingConfig(streamingConfig).build(); mRequestObserver.onNext(streamingRecognizeRequest); }
.build(); StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();
.build(); StreamingRecognitionConfig config = StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();
StreamingRecognitionConfig.newBuilder().setConfig(recConfig).build();