@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.SpeechRecognitionAlternative)) { return super.equals(obj); } com.google.cloud.speech.v1.SpeechRecognitionAlternative other = (com.google.cloud.speech.v1.SpeechRecognitionAlternative) obj; boolean result = true; result = result && getTranscript().equals(other.getTranscript()); result = result && (java.lang.Float.floatToIntBits(getConfidence()) == java.lang.Float.floatToIntBits(other.getConfidence())); result = result && getWordsList().equals(other.getWordsList()); result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * Output only. Transcript text representing the words that the user spoke. * </pre> * * <code>string transcript = 1;</code> */ public Builder clearTranscript() { transcript_ = getDefaultInstance().getTranscript(); onChanged(); return this; } /**
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + TRANSCRIPT_FIELD_NUMBER; hash = (53 * hash) + getTranscript().hashCode(); hash = (37 * hash) + CONFIDENCE_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits(getConfidence()); if (getWordsCount() > 0) { hash = (37 * hash) + WORDS_FIELD_NUMBER; hash = (53 * hash) + getWordsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
public Builder mergeFrom(com.google.cloud.speech.v1.SpeechRecognitionAlternative other) { if (other == com.google.cloud.speech.v1.SpeechRecognitionAlternative.getDefaultInstance()) return this; if (!other.getTranscript().isEmpty()) { transcript_ = other.transcript_; onChanged();
List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList(); for (SpeechRecognitionAlternative alternative : alternatives) { System.out.printf("Transcription: %s%n", alternative.getTranscript());
@Test public void syncRecognize() { RecognizeResponse response = speechClient.recognize(config(), audio()); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
@Test public void longrunningRecognize() throws Exception { LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(config(), audio()).get(); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
public void onResponse(StreamingRecognizeResponse response) { responses.add(response); StreamingRecognitionResult result = response.getResultsList().get(0); // There can be several alternative transcripts for a given chunk of speech. Just // use the first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); }
public void onComplete() { for (StreamingRecognizeResponse response : responses) { StreamingRecognitionResult result = response.getResultsList().get(0); SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); } }
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcription: %s%n", alternative.getTranscript());
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
System.out.printf("Transcription: %s%n", alternative.getTranscript());
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcript : %s\n", alternative.getTranscript());
System.out.printf("Transcription: %s\n", alternative.getTranscript());
System.out.format("Transcript: %s\n\n", alternative.getTranscript());
/** * Performs speech recognition on remote FLAC file and prints the transcription. * * @param gcsUri the path to the remote FLAC audio file to transcribe. */ public static void syncRecognizeGcs(String gcsUri) throws Exception { // Instantiates a client with GOOGLE_APPLICATION_CREDENTIALS try (SpeechClient speech = SpeechClient.create()) { // Builds the request for remote FLAC file RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.FLAC) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(gcsUri).build(); // Use blocking call for getting audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync_gcs]