List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList(); for (SpeechRecognitionAlternative alternative : alternatives) { System.out.printf("Transcription: %s%n", alternative.getTranscript());
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + TRANSCRIPT_FIELD_NUMBER; hash = (53 * hash) + getTranscript().hashCode(); hash = (37 * hash) + CONFIDENCE_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits(getConfidence()); if (getWordsCount() > 0) { hash = (37 * hash) + WORDS_FIELD_NUMBER; hash = (53 * hash) + getWordsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public com.google.cloud.speech.v1.SpeechRecognitionAlternative buildPartial() { com.google.cloud.speech.v1.SpeechRecognitionAlternative result = new com.google.cloud.speech.v1.SpeechRecognitionAlternative(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; result.transcript_ = transcript_; result.confidence_ = confidence_; if (wordsBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004)) { words_ = java.util.Collections.unmodifiableList(words_); bitField0_ = (bitField0_ & ~0x00000004); } result.words_ = words_; } else { result.words_ = wordsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.SpeechRecognitionAlternative)) { return super.equals(obj); } com.google.cloud.speech.v1.SpeechRecognitionAlternative other = (com.google.cloud.speech.v1.SpeechRecognitionAlternative) obj; boolean result = true; result = result && getTranscript().equals(other.getTranscript()); result = result && (java.lang.Float.floatToIntBits(getConfidence()) == java.lang.Float.floatToIntBits(other.getConfidence())); result = result && getWordsList().equals(other.getWordsList()); result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * Output only. Transcript text representing the words that the user spoke. * </pre> * * <code>string transcript = 1;</code> */ public Builder clearTranscript() { transcript_ = getDefaultInstance().getTranscript(); onChanged(); return this; } /**
public Builder mergeFrom(com.google.cloud.speech.v1.SpeechRecognitionAlternative other) { if (other == com.google.cloud.speech.v1.SpeechRecognitionAlternative.getDefaultInstance()) return this; if (!other.getTranscript().isEmpty()) { transcript_ = other.transcript_; onChanged(); if (other.getConfidence() != 0F) { setConfidence(other.getConfidence());
System.out.printf("Transcription: %s%n", alternative.getTranscript()); for (WordInfo wordInfo : alternative.getWordsList()) { System.out.println(wordInfo.getWord()); System.out.printf(
/** * Handle a single {@link StreamingRecognitionResult} by creating * a {@link TranscriptionResult} based on the result and notifying all * all registered {@link TranscriptionListener}s * * @param result the result to handle */ private void handleResult(StreamingRecognitionResult result) { List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList(); if(alternatives.isEmpty()) { return; } TranscriptionResult transcriptionResult = new TranscriptionResult( null, this.messageID, !result.getIsFinal(), this.languageTag, result.getStability()); for(SpeechRecognitionAlternative alternative : alternatives) { transcriptionResult.addAlternative( new TranscriptionAlternative( alternative.getTranscript(), alternative.getConfidence())); } sent(transcriptionResult); }
session.putAttribute(ff, "google.speech.confidence", String.valueOf(alternative.getConfidence())); session.putAttribute(ff, "google.speech.serialized.size", String.valueOf(alternative.getSerializedSize())); session.putAttribute(ff, "google.speech.words.count", String.valueOf(alternative.getWordsCount()));
System.out.printf("Transcription: %s\n", alternative.getTranscript()); for (WordInfo wordInfo : alternative.getWordsList()) { System.out.println(wordInfo.getWord()); System.out.printf(
@Test public void syncRecognize() { RecognizeResponse response = speechClient.recognize(config(), audio()); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
@Test public void longrunningRecognize() throws Exception { LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(config(), audio()).get(); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
public void onResponse(StreamingRecognizeResponse response) { responses.add(response); StreamingRecognitionResult result = response.getResultsList().get(0); // There can be several alternative transcripts for a given chunk of speech. Just // use the first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); }
public void onComplete() { for (StreamingRecognizeResponse response : responses) { StreamingRecognitionResult result = response.getResultsList().get(0); SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); } }
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcription: %s%n", alternative.getTranscript());
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
System.out.printf("Transcription: %s%n", alternative.getTranscript());