return recognize(request);
RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList();
@Test public void syncRecognize() { RecognizeResponse response = speechClient.recognize(config(), audio()); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
public static void executeNoCatch() throws Exception { try (SpeechClient client = SpeechClient.create()) { String languageCode = "en-US"; int sampleRateHertz = 44100; RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; RecognitionConfig config = RecognitionConfig.newBuilder() .setLanguageCode(languageCode) .setSampleRateHertz(sampleRateHertz) .setEncoding(encoding) .build(); String uri = "gs://gapic-toolkit/hello.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); RecognizeResponse response = client.recognize(config, audio); } }
@Test @SuppressWarnings("all") public void recognizeExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockSpeech.addException(exception); try { RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); client.recognize(config, audio); Assert.fail("No exception raised"); } catch (InvalidArgumentException e) { // Expected exception } }
@Test @SuppressWarnings("all") public void recognizeTest() { RecognizeResponse expectedResponse = RecognizeResponse.newBuilder().build(); mockSpeech.addResponse(expectedResponse); RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); RecognizeResponse actualResponse = client.recognize(config, audio); Assert.assertEquals(expectedResponse, actualResponse); List<GeneratedMessageV3> actualRequests = mockSpeech.getRequests(); Assert.assertEquals(1, actualRequests.size()); RecognizeRequest actualRequest = (RecognizeRequest) actualRequests.get(0); Assert.assertEquals(config, actualRequest.getConfig()); Assert.assertEquals(audio, actualRequest.getAudio()); Assert.assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); }
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build(); RecognizeResponse recognizeResponse = speech.recognize(recConfig, recognitionAudio);
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
RecognizeResponse recognizeResponse = speechClient.recognize(recConfig, recognitionAudio);
RecognizeResponse response = speechClient.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList();
RecognizeResponse recognizeResponse = speechClient.recognize(config, recognitionAudio);
/** * Performs speech recognition on remote FLAC file and prints the transcription. * * @param gcsUri the path to the remote FLAC audio file to transcribe. */ public static void syncRecognizeGcs(String gcsUri) throws Exception { // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS try (SpeechClient speech = SpeechClient.create()) { // Builds the request for remote FLAC file RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.FLAC) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build(); // Use blocking call for getting audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync_gcs]
RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList();
@Override public void process(InputStream inputStream) throws IOException { byte[] data = IOUtils.toByteArray(inputStream); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder() .setContent(audioBytes) .build(); // Use blocking call to get audio transcript RecognizeResponse response = speechClient.recognize(config, audio); speechResults.set(response.getResultsList()); } });
client.recognize(config, audio);