@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasError()) { hash = (37 * hash) + ERROR_FIELD_NUMBER; hash = (53 * hash) + getError().hashCode(); } if (getResultsCount() > 0) { hash = (37 * hash) + RESULTS_FIELD_NUMBER; hash = (53 * hash) + getResultsList().hashCode(); } hash = (37 * hash) + SPEECH_EVENT_TYPE_FIELD_NUMBER; hash = (53 * hash) + speechEventType_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public com.google.cloud.speech.v1.StreamingRecognizeResponse getDefaultInstanceForType() { return com.google.cloud.speech.v1.StreamingRecognizeResponse.getDefaultInstance(); }
@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.StreamingRecognizeResponse)) { return super.equals(obj); } com.google.cloud.speech.v1.StreamingRecognizeResponse other = (com.google.cloud.speech.v1.StreamingRecognizeResponse) obj; boolean result = true; result = result && (hasError() == other.hasError()); if (hasError()) { result = result && getError().equals(other.getError()); } result = result && getResultsList().equals(other.getResultsList()); result = result && speechEventType_ == other.speechEventType_; result = result && unknownFields.equals(other.unknownFields); return result; }
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
public Builder mergeFrom(com.google.cloud.speech.v1.StreamingRecognizeResponse other) { if (other == com.google.cloud.speech.v1.StreamingRecognizeResponse.getDefaultInstance()) return this; if (other.hasError()) { mergeError(other.getError()); setSpeechEventTypeValue(other.getSpeechEventTypeValue());
public void onResponse(StreamingRecognizeResponse response) { responses.add(response); StreamingRecognitionResult result = response.getResultsList().get(0); // There can be several alternative transcripts for a given chunk of speech. Just // use the first (most likely) one here. SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); }
/** * * * <pre> * Output only. If set, returns a [google.rpc.Status][google.rpc.Status] message that * specifies the error for the operation. * </pre> * * <code>.google.rpc.Status error = 1;</code> */ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { return getError(); }
@Override public void onNext(StreamingRecognizeResponse response) { String text = null; boolean isFinal = false; if (response.getResultsCount() > 0) { final StreamingRecognitionResult result = response.getResults(0); isFinal = result.getIsFinal(); if (result.getAlternativesCount() > 0) { final SpeechRecognitionAlternative alternative = result.getAlternatives(0); text = alternative.getTranscript(); } } if (text != null) { for (Listener listener : mListeners) { listener.onSpeechRecognized(text, isFinal); } } }
public void onComplete() { for (StreamingRecognizeResponse response : responses) { StreamingRecognitionResult result = response.getResultsList().get(0); SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); System.out.printf("Transcript : %s\n", alternative.getTranscript()); } }
@java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (error_ != null) { output.writeMessage(1, getError()); } for (int i = 0; i < results_.size(); i++) { output.writeMessage(2, results_.get(i)); } if (speechEventType_ != com.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType .SPEECH_EVENT_UNSPECIFIED.getNumber()) { output.writeEnum(4, speechEventType_); } unknownFields.writeTo(output); }
if(message.hasError()) logger.debug( "Received error from StreamingRecognizeResponse: " + message.getError().getMessage()); requestManager.terminateCurrentSession(); return; message.getResultsCount() == 0) List<StreamingRecognitionResult> results = message.getResultsList();
StreamingRecognitionResult result = response.getResultsList().get(0);
io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.StreamingRecognizeResponse .getDefaultInstance())) .setSchemaDescriptor(new SpeechMethodDescriptorSupplier("StreamingRecognize")) .build();
@java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (error_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getError()); } for (int i = 0; i < results_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, results_.get(i)); } if (speechEventType_ != com.google.cloud.speech.v1.StreamingRecognizeResponse.SpeechEventType .SPEECH_EVENT_UNSPECIFIED.getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, speechEventType_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; }
@Test @SuppressWarnings("all") public void streamingRecognizeTest() throws Exception { StreamingRecognizeResponse expectedResponse = StreamingRecognizeResponse.newBuilder().build(); mockSpeech.addResponse(expectedResponse); StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build(); MockStreamObserver<StreamingRecognizeResponse> responseObserver = new MockStreamObserver<>(); BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable = client.streamingRecognizeCallable(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = callable.bidiStreamingCall(responseObserver); requestObserver.onNext(request); requestObserver.onCompleted(); List<StreamingRecognizeResponse> actualResponses = responseObserver.future().get(); Assert.assertEquals(1, actualResponses.size()); Assert.assertEquals(expectedResponse, actualResponses.get(0)); }
StreamingRecognitionResult result = response.getResultsList().get(0);
io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.StreamingRecognizeResponse .getDefaultInstance())) .setSchemaDescriptor(new SpeechMethodDescriptorSupplier("StreamingRecognize")) .build();