@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognitionConfig other = (com.google.cloud.speech.v1.RecognitionConfig) obj; boolean result = true; result = result && encoding_ == other.encoding_; result = result && (getSampleRateHertz() == other.getSampleRateHertz()); result = result && (getEnableSeparateRecognitionPerChannel() == other.getEnableSeparateRecognitionPerChannel()); result = result && getLanguageCode().equals(other.getLanguageCode()); result = result && (getMaxAlternatives() == other.getMaxAlternatives()); result = result && (getProfanityFilter() == other.getProfanityFilter()); result = result && getSpeechContextsList().equals(other.getSpeechContextsList()); result = result && (getEnableWordTimeOffsets() == other.getEnableWordTimeOffsets()); result = result && (getEnableAutomaticPunctuation() == other.getEnableAutomaticPunctuation()); result = result && getModel().equals(other.getModel()); result = result && (getUseEnhanced() == other.getUseEnhanced()); result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig buildPartial() { com.google.cloud.speech.v1.RecognitionConfig result = new com.google.cloud.speech.v1.RecognitionConfig(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; result.encoding_ = encoding_; result.sampleRateHertz_ = sampleRateHertz_; result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_; result.languageCode_ = languageCode_; result.maxAlternatives_ = maxAlternatives_; result.profanityFilter_ = profanityFilter_; if (speechContextsBuilder_ == null) { if (((bitField0_ & 0x00000040) == 0x00000040)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); bitField0_ = (bitField0_ & ~0x00000040); } result.speechContexts_ = speechContexts_; } else { result.speechContexts_ = speechContextsBuilder_.build(); } result.enableWordTimeOffsets_ = enableWordTimeOffsets_; result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_; result.model_ = model_; result.useEnhanced_ = useEnhanced_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
/** * * * <pre> * *Required* The language of the supplied audio as a * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. * Example: "en-US". * See [Language Support](/speech-to-text/docs/languages) * for a list of the currently supported language codes. * </pre> * * <code>string language_code = 3;</code> */ public Builder clearLanguageCode() { languageCode_ = getDefaultInstance().getLanguageCode(); onChanged(); return this; } /**
model_ = getDefaultInstance().getModel(); onChanged(); return this;
public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { if (other == com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance()) return this; if (other.encoding_ != 0) { setEncodingValue(other.getEncodingValue()); if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); if (other.getEnableSeparateRecognitionPerChannel() != false) { setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel()); if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; onChanged(); if (other.getMaxAlternatives() != 0) { setMaxAlternatives(other.getMaxAlternatives()); if (other.getProfanityFilter() != false) { setProfanityFilter(other.getProfanityFilter()); if (other.getEnableWordTimeOffsets() != false) { setEnableWordTimeOffsets(other.getEnableWordTimeOffsets()); if (other.getEnableAutomaticPunctuation() != false) { setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation()); if (!other.getModel().isEmpty()) { model_ = other.model_; onChanged();
@java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() { return com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance(); }
new ResponseApiStreamingObserver<StreamingRecognizeResponse>( this, config.getLanguageCode());
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.StreamingRecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1.StreamingRecognitionConfig other = (com.google.cloud.speech.v1.StreamingRecognitionConfig) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (getSingleUtterance() == other.getSingleUtterance()); result = result && (getInterimResults() == other.getInterimResults()); result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null ? com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance() : config_; } /**
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognizeRequest other = (com.google.cloud.speech.v1.RecognizeRequest) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (hasAudio() == other.hasAudio()); if (hasAudio()) { result = result && getAudio().equals(other.getAudio()); } result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { if (config_ != null) { config_ = com.google.cloud.speech.v1.RecognitionConfig.newBuilder(config_) .mergeFrom(value) .buildPartial(); } else { config_ = value; } onChanged(); } else { configBuilder_.mergeFrom(value); } return this; } /**
hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + ENCODING_FIELD_NUMBER; hash = (53 * hash) + encoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel()); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getMaxAlternatives(); hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter()); if (getSpeechContextsCount() > 0) { hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER; hash = (53 * hash) + getSpeechContextsList().hashCode(); hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets()); hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation()); hash = (37 * hash) + MODEL_FIELD_NUMBER; hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + USE_ENHANCED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseEnhanced()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash;
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null ? com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance() : config_; } /**
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.LongRunningRecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.LongRunningRecognizeRequest other = (com.google.cloud.speech.v1.LongRunningRecognizeRequest) obj; boolean result = true; result = result && (hasConfig() == other.hasConfig()); if (hasConfig()) { result = result && getConfig().equals(other.getConfig()); } result = result && (hasAudio() == other.hasAudio()); if (hasAudio()) { result = result && getAudio().equals(other.getAudio()); } result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { if (config_ != null) { config_ = com.google.cloud.speech.v1.RecognitionConfig.newBuilder(config_) .mergeFrom(value) .buildPartial(); } else { config_ = value; } onChanged(); } else { configBuilder_.mergeFrom(value); } return this; } /**
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfig getConfig() { return config_ == null ? com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance() : config_; } /**
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public Builder mergeConfig(com.google.cloud.speech.v1.RecognitionConfig value) { if (configBuilder_ == null) { if (config_ != null) { config_ = com.google.cloud.speech.v1.RecognitionConfig.newBuilder(config_) .mergeFrom(value) .buildPartial(); } else { config_ = value; } onChanged(); } else { configBuilder_.mergeFrom(value); } return this; } /**
/** * * * <pre> * *Required* Provides information to the recognizer that specifies how to * process the request. * </pre> * * <code>.google.cloud.speech.v1.RecognitionConfig config = 1;</code> */ public com.google.cloud.speech.v1.RecognitionConfigOrBuilder getConfigOrBuilder() { if (configBuilder_ != null) { return configBuilder_.getMessageOrBuilder(); } else { return config_ == null ? com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance() : config_; } } /**
RecognitionConfig.newBuilder() .setEncoding(AudioEncoding.LINEAR16) .setSampleRateHertz(16000)