.setLanguageCode("en-US") .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
.setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
.setContent(audioBytes) .build();
@Override public void process(InputStream inputStream) throws IOException { byte[] data = IOUtils.toByteArray(inputStream); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder() .setContent(audioBytes) .build(); // Use blocking call to get audio transcript RecognizeResponse response = speechClient.recognize(config, audio); speechResults.set(response.getResultsList()); } });
private RecognitionAudio createRecognitionAudio(InputStream voiceStream) throws IOException { return RecognitionAudio.newBuilder().setContent(ByteString.readFrom(voiceStream)).build(); } }
.setEnableWordTimeOffsets(true) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
.build(); RecognitionAudio audio = RecognitionAudio.newBuilder() .setContent(audioBytes) .build();
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionAudio other) { if (other == com.google.cloud.speech.v1.RecognitionAudio.getDefaultInstance()) return this; switch (other.getAudioSourceCase()) { case CONTENT: { setContent(other.getContent()); break; } case URI: { audioSourceCase_ = 2; audioSource_ = other.audioSource_; onChanged(); break; } case AUDIOSOURCE_NOT_SET: { break; } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }