@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
@java.lang.Override public com.google.cloud.speech.v1.RecognitionAudio buildPartial() { com.google.cloud.speech.v1.RecognitionAudio result = new com.google.cloud.speech.v1.RecognitionAudio(this); if (audioSourceCase_ == 1) { result.audioSource_ = audioSource_; } if (audioSourceCase_ == 2) { result.audioSource_ = audioSource_; } result.audioSourceCase_ = audioSourceCase_; onBuilt(); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognitionAudio)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognitionAudio other = (com.google.cloud.speech.v1.RecognitionAudio) obj; boolean result = true; result = result && getAudioSourceCase().equals(other.getAudioSourceCase()); if (!result) return false; switch (audioSourceCase_) { case 1: result = result && getContent().equals(other.getContent()); break; case 2: result = result && getUri().equals(other.getUri()); break; case 0: default: } result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); switch (audioSourceCase_) { case 1: hash = (37 * hash) + CONTENT_FIELD_NUMBER; hash = (53 * hash) + getContent().hashCode(); break; case 2: hash = (37 * hash) + URI_FIELD_NUMBER; hash = (53 * hash) + getUri().hashCode(); break; case 0: default: } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionAudio other) { if (other == com.google.cloud.speech.v1.RecognitionAudio.getDefaultInstance()) return this; switch (other.getAudioSourceCase()) { case CONTENT: { setContent(other.getContent()); break; } case URI: { audioSourceCase_ = 2; audioSource_ = other.audioSource_; onChanged(); break; } case AUDIOSOURCE_NOT_SET: { break; } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognizeRequest other = (com.google.cloud.speech.v1.RecognizeRequest) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (hasAudio() == other.hasAudio()); if (hasAudio()) { result = result && getAudio().equals(other.getAudio()); } result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.LongRunningRecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.LongRunningRecognizeRequest other = (com.google.cloud.speech.v1.LongRunningRecognizeRequest) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (hasAudio() == other.hasAudio()); if (hasAudio()) { result = result && getAudio().equals(other.getAudio()); } result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * *Required* The audio data to be recognized. * </pre> * * <code>.google.cloud.speech.v1.RecognitionAudio audio = 2;</code> */ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { if (audio_ != null) { audio_ = com.google.cloud.speech.v1.RecognitionAudio.newBuilder(audio_) .mergeFrom(value) .buildPartial(); } else { audio_ = value; } onChanged(); } else { audioBuilder_.mergeFrom(value); } return this; } /**
/** * * * <pre> * *Required* The audio data to be recognized. * </pre> * * <code>.google.cloud.speech.v1.RecognitionAudio audio = 2;</code> */ public Builder mergeAudio(com.google.cloud.speech.v1.RecognitionAudio value) { if (audioBuilder_ == null) { if (audio_ != null) { audio_ = com.google.cloud.speech.v1.RecognitionAudio.newBuilder(audio_) .mergeFrom(value) .buildPartial(); } else { audio_ = value; } onChanged(); } else { audioBuilder_.mergeFrom(value); } return this; } /**
public RecognitionAudio audio() { return RecognitionAudio.newBuilder().setUri("gs://gapic-toolkit/hello.flac").build(); } }
.setLanguageCode("en-US") .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
public static void executeNoCatch() throws Exception { try (SpeechClient client = SpeechClient.create()) { String languageCode = "en-US"; int sampleRateHertz = 44100; RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; RecognitionConfig config = RecognitionConfig.newBuilder() .setLanguageCode(languageCode) .setSampleRateHertz(sampleRateHertz) .setEncoding(encoding) .build(); String uri = "gs://gapic-toolkit/hello.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); RecognizeResponse response = client.recognize(config, audio); } }
@Test @SuppressWarnings("all") public void longRunningRecognizeExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockSpeech.addException(exception); try { RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); client.longRunningRecognizeAsync(config, audio).get(); Assert.fail("No exception raised"); } catch (ExecutionException e) { Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); } }
@Test @SuppressWarnings("all") public void recognizeExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockSpeech.addException(exception); try { RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); client.recognize(config, audio); Assert.fail("No exception raised"); } catch (InvalidArgumentException e) { // Expected exception } }
@Test @SuppressWarnings("all") public void recognizeTest() { RecognizeResponse expectedResponse = RecognizeResponse.newBuilder().build(); mockSpeech.addResponse(expectedResponse); RecognitionConfig.AudioEncoding encoding = RecognitionConfig.AudioEncoding.FLAC; int sampleRateHertz = 44100; String languageCode = "en-US"; RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(encoding) .setSampleRateHertz(sampleRateHertz) .setLanguageCode(languageCode) .build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build(); RecognizeResponse actualResponse = client.recognize(config, audio); Assert.assertEquals(expectedResponse, actualResponse); List<GeneratedMessageV3> actualRequests = mockSpeech.getRequests(); Assert.assertEquals(1, actualRequests.size()); RecognizeRequest actualRequest = (RecognizeRequest) actualRequests.get(0); Assert.assertEquals(config, actualRequest.getConfig()); Assert.assertEquals(audio, actualRequest.getAudio()); Assert.assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern())); }
.build(); String uri = "gs://bucket_name/file_name.flac"; RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uri).build();
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();
.setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build();
/** * Performs speech recognition on raw PCM audio and prints the transcription. * * @param fileName the path to a PCM audio file to transcribe. */ public static void syncRecognizeFile(String fileName) throws Exception { try (SpeechClient speech = SpeechClient.create()) { Path path = Paths.get(fileName); byte[] data = Files.readAllBytes(path); ByteString audioBytes = ByteString.copyFrom(data); // Configure request with local raw PCM audio RecognitionConfig config = RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setLanguageCode("en-US") .setSampleRateHertz(16000) .build(); RecognitionAudio audio = RecognitionAudio.newBuilder().setContent(audioBytes).build(); // Use blocking call to get audio transcript RecognizeResponse response = speech.recognize(config, audio); List<SpeechRecognitionResult> results = response.getResultsList(); for (SpeechRecognitionResult result : results) { // There can be several alternative transcripts for a given chunk of speech. Just use the // first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcription: %s%n", alternative.getTranscript()); } } } // [END speech_transcribe_sync]
RecognitionAudio.newBuilder().setContent(ByteString.copyFrom(content)).build();