@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getAlternativesCount() > 0) { hash = (37 * hash) + ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getAlternativesList().hashCode(); } hash = (37 * hash) + CHANNEL_TAG_FIELD_NUMBER; hash = (53 * hash) + getChannelTag(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public com.google.cloud.speech.v1.SpeechRecognitionResult buildPartial() { com.google.cloud.speech.v1.SpeechRecognitionResult result = new com.google.cloud.speech.v1.SpeechRecognitionResult(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (alternativesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { alternatives_ = java.util.Collections.unmodifiableList(alternatives_); bitField0_ = (bitField0_ & ~0x00000001); } result.alternatives_ = alternatives_; } else { result.alternatives_ = alternativesBuilder_.build(); } result.channelTag_ = channelTag_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public Builder mergeFrom(com.google.cloud.speech.v1.SpeechRecognitionResult other) { if (other == com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance()) return this; if (alternativesBuilder_ == null) { if (other.getChannelTag() != 0) { setChannelTag(other.getChannelTag());
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.SpeechRecognitionResult)) { return super.equals(obj); } com.google.cloud.speech.v1.SpeechRecognitionResult other = (com.google.cloud.speech.v1.SpeechRecognitionResult) obj; boolean result = true; result = result && getAlternativesList().equals(other.getAlternativesList()); result = result && (getChannelTag() == other.getChannelTag()); result = result && unknownFields.equals(other.unknownFields); return result; }
@Test public void syncRecognize() { RecognizeResponse response = speechClient.recognize(config(), audio()); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
List<SpeechRecognitionAlternative> alternatives = result.getAlternativesList(); for (SpeechRecognitionAlternative alternative : alternatives) { System.out.printf("Transcription: %s%n", alternative.getTranscript());
SpeechRecognitionAlternative alternative = result.getAlternatives(0); System.out.format("Transcript: %s\n\n", alternative.getTranscript());
@java.lang.Override public com.google.cloud.speech.v1.SpeechRecognitionResult getDefaultInstanceForType() { return com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance(); }
@Test public void longrunningRecognize() throws Exception { LongRunningRecognizeResponse response = speechClient.longRunningRecognizeAsync(config(), audio()).get(); Truth.assertThat(response.getResultsCount()).isGreaterThan(0); Truth.assertThat(response.getResults(0).getAlternativesCount()).isGreaterThan(0); String text = response.getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript());
final SpeechRecognitionAlternative alternative = result.getAlternatives(0); FlowFile ff = session.write(session.create(), new OutputStreamCallback() { @Override
/** * * * <pre> * Output only. Sequential list of transcription results corresponding to * sequential portions of audio. * </pre> * * <code>repeated .google.cloud.speech.v1.SpeechRecognitionResult results = 2;</code> */ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuilder() { return getResultsFieldBuilder() .addBuilder(com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance()); } /**
/** * @see AIDataService#voiceRequest(InputStream, RequestExtras, AIServiceContext) */ @Override public AIResponse voiceRequest(InputStream voiceStream, RequestExtras requestExtras, AIServiceContext serviceContext) throws AIServiceException { RecognizeResponse response; try { SpeechClient speechClient = SpeechClient.create(); RecognitionAudio recognitionAudio = createRecognitionAudio(voiceStream); response = speechClient.recognize(config.getRecognitionConfig(), recognitionAudio); } catch (IOException | StatusRuntimeException e) { throw new AIServiceException("Failed to recognize speech", e); } if ((response.getResultsCount() == 0) || (response.getResults(0).getAlternativesCount() == 0)) { throw new AIServiceException("No speech"); } String transcript = response.getResults(0).getAlternatives(0).getTranscript(); AIRequest request = new AIRequest(transcript); return request(request, requestExtras, serviceContext); }
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript());
/** * * * <pre> * Output only. Sequential list of transcription results corresponding to * sequential portions of audio. * </pre> * * <code>repeated .google.cloud.speech.v1.SpeechRecognitionResult results = 2;</code> */ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuilder() { return getResultsFieldBuilder() .addBuilder(com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance()); } /**
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
/** * * * <pre> * Output only. Sequential list of transcription results corresponding to * sequential portions of audio. * </pre> * * <code>repeated .google.cloud.speech.v1.SpeechRecognitionResult results = 2;</code> */ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuilder(int index) { return getResultsFieldBuilder() .addBuilder( index, com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance()); } /**
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
/** * * * <pre> * Output only. Sequential list of transcription results corresponding to * sequential portions of audio. * </pre> * * <code>repeated .google.cloud.speech.v1.SpeechRecognitionResult results = 2;</code> */ public com.google.cloud.speech.v1.SpeechRecognitionResult.Builder addResultsBuilder(int index) { return getResultsFieldBuilder() .addBuilder( index, com.google.cloud.speech.v1.SpeechRecognitionResult.getDefaultInstance()); } /**
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript());