@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
request = StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(sharedQueue.take())) .build();
.setAudioContent(ByteString.copyFrom(data)) .build(); clientStream.send(request);
.setAudioContent(audioBytes) .build());
/** * Recognizes the speech audio. This method should be called every time a chunk of byte buffer * is ready. * * @param data The audio data. * @param size The number of elements that are actually relevant in the {@code data}. */ public void recognize(byte[] data, int size) { if (mRequestObserver == null) { return; } // Call the streaming recognition API mRequestObserver.onNext(StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(data, 0, size)) .build()); }
.setAudioContent(ByteString.copyFrom(data)) .build());
.setAudioContent(ByteString.copyFrom(data)) .build());
public Builder mergeFrom(com.google.cloud.speech.v1.StreamingRecognizeRequest other) { if (other == com.google.cloud.speech.v1.StreamingRecognizeRequest.getDefaultInstance()) return this; switch (other.getStreamingRequestCase()) { case STREAMING_CONFIG: { mergeStreamingConfig(other.getStreamingConfig()); break; } case AUDIO_CONTENT: { setAudioContent(other.getAudioContent()); break; } case STREAMINGREQUEST_NOT_SET: { break; } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }