hash = (19 * hash) + getDescriptor().hashCode(); if (getFaceAnnotationsCount() > 0) { hash = (37 * hash) + FACE_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getFaceAnnotationsList().hashCode(); if (getLandmarkAnnotationsCount() > 0) { hash = (37 * hash) + LANDMARK_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getLandmarkAnnotationsList().hashCode(); if (getLogoAnnotationsCount() > 0) { hash = (37 * hash) + LOGO_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getLogoAnnotationsList().hashCode(); if (getLabelAnnotationsCount() > 0) { hash = (37 * hash) + LABEL_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getLabelAnnotationsList().hashCode(); if (getLocalizedObjectAnnotationsCount() > 0) { hash = (37 * hash) + LOCALIZED_OBJECT_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getLocalizedObjectAnnotationsList().hashCode(); if (getTextAnnotationsCount() > 0) { hash = (37 * hash) + TEXT_ANNOTATIONS_FIELD_NUMBER; hash = (53 * hash) + getTextAnnotationsList().hashCode(); if (hasFullTextAnnotation()) { hash = (37 * hash) + FULL_TEXT_ANNOTATION_FIELD_NUMBER; hash = (53 * hash) + getFullTextAnnotation().hashCode();
result = result && getFaceAnnotationsList().equals(other.getFaceAnnotationsList()); result = result && getLandmarkAnnotationsList().equals(other.getLandmarkAnnotationsList()); result = result && getLogoAnnotationsList().equals(other.getLogoAnnotationsList()); result = result && getLabelAnnotationsList().equals(other.getLabelAnnotationsList()); result = result && getLocalizedObjectAnnotationsList() .equals(other.getLocalizedObjectAnnotationsList()); result = result && getTextAnnotationsList().equals(other.getTextAnnotationsList()); result = result && (hasFullTextAnnotation() == other.hasFullTextAnnotation()); if (hasFullTextAnnotation()) { result = result && getFullTextAnnotation().equals(other.getFullTextAnnotation()); result = result && (hasSafeSearchAnnotation() == other.hasSafeSearchAnnotation()); if (hasSafeSearchAnnotation()) { result = result && getSafeSearchAnnotation().equals(other.getSafeSearchAnnotation()); result = result && (hasImagePropertiesAnnotation() == other.hasImagePropertiesAnnotation()); if (hasImagePropertiesAnnotation()) { result = result && getImagePropertiesAnnotation().equals(other.getImagePropertiesAnnotation()); result = result && (hasCropHintsAnnotation() == other.hasCropHintsAnnotation()); if (hasCropHintsAnnotation()) { result = result && getCropHintsAnnotation().equals(other.getCropHintsAnnotation()); result = result && (hasWebDetection() == other.hasWebDetection()); if (hasWebDetection()) { result = result && getWebDetection().equals(other.getWebDetection());
output.writeMessage(6, getSafeSearchAnnotation()); output.writeMessage(8, getImagePropertiesAnnotation()); output.writeMessage(9, getError()); output.writeMessage(11, getCropHintsAnnotation()); output.writeMessage(12, getFullTextAnnotation()); output.writeMessage(13, getWebDetection()); output.writeMessage(14, getProductSearchResults()); output.writeMessage(21, getContext());
public static void main(String... args) throws Exception { ImageAnnotatorClient vision = ImageAnnotatorClient.create(); Feature feat = Feature.newBuilder().setType(Type.LABEL_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); BatchAnnotateImagesResponse response = vision.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { System.out.printf("Error: %s\n", res.getError().getMessage()); return; for (EntityAnnotation annotation : res.getLabelAnnotationsList()) { for (Map.Entry<FieldDescriptor, Object> entry : annotation.getAllFields().entrySet()) { System.out.printf("%s : %s\n", entry.getKey(), entry.getValue());
Feature feat = Feature.newBuilder().setType(Type.LANDMARK_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; for (EntityAnnotation annotation : res.getLandmarkAnnotationsList()) { LocationInfo info = annotation.getLocationsList().listIterator().next(); out.printf("Landmark: %s\n %s\n", annotation.getDescription(), info.getLatLng());
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); client.close(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; TextAnnotation annotation = res.getFullTextAnnotation(); for (Page page: annotation.getPagesList()) { String pageText = "";
Feature feat = Feature.newBuilder().setType(Type.LOGO_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; for (EntityAnnotation annotation : res.getLogoAnnotationsList()) { out.println(annotation.getDescription());
Feature feat = Feature.newBuilder().setType(Type.TEXT_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; for (EntityAnnotation annotation : res.getTextAnnotationsList()) { out.printf("Text: %s\n", annotation.getDescription()); out.printf("Position : %s\n", annotation.getBoundingPoly());
Feature feat = Feature.newBuilder().setType(Type.CROP_HINTS).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; CropHintsAnnotation annotation = res.getCropHintsAnnotation(); for (CropHint hint : annotation.getCropHintsList()) { out.println(hint.getBoundingPoly());
Feature feat = Feature.newBuilder().setType(Type.IMAGE_PROPERTIES).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; DominantColorsAnnotation colors = res.getImagePropertiesAnnotation().getDominantColors(); for (ColorInfo color : colors.getColorsList()) { out.printf(
Feature feat = Feature.newBuilder().setType(Type.FACE_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; for (FaceAnnotation annotation : res.getFaceAnnotationsList()) { out.printf( "anger: %s\njoy: %s\nsurprise: %s\nposition: %s",
Feature feat = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; SafeSearchAnnotation annotation = res.getSafeSearchAnnotation(); out.printf( "adult: %s\nmedical: %s\nspoofed: %s\nviolence: %s\nracy: %s\n",
Feature feat = Feature.newBuilder().setType(Type.WEB_DETECTION).build(); AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); if (res.hasError()) { out.printf("Error: %s\n", res.getError().getMessage()); return; WebDetection annotation = res.getWebDetection(); out.println("Entity:Id:Score"); out.println("===============");
/** * Extract the text out of an image and return the result as a String. * @param imageResource the image one wishes to analyze * @return the text extracted from the image aggregated to a String * @throws CloudVisionException if the image could not be read or if text extraction failed */ public String extractTextFromImage(Resource imageResource) { AnnotateImageResponse response = analyzeImage(imageResource, Type.TEXT_DETECTION); String result = response.getFullTextAnnotation().getText(); if (result.isEmpty() && response.getError().getCode() != Code.OK.getNumber()) { throw new CloudVisionException(response.getError().getMessage()); } return result; }
public Builder mergeFrom(com.google.cloud.vision.v1.AnnotateImageResponse other) { if (other == com.google.cloud.vision.v1.AnnotateImageResponse.getDefaultInstance()) return this; if (faceAnnotationsBuilder_ == null) { if (other.hasFullTextAnnotation()) { mergeFullTextAnnotation(other.getFullTextAnnotation()); if (other.hasSafeSearchAnnotation()) { mergeSafeSearchAnnotation(other.getSafeSearchAnnotation()); if (other.hasImagePropertiesAnnotation()) { mergeImagePropertiesAnnotation(other.getImagePropertiesAnnotation()); if (other.hasCropHintsAnnotation()) { mergeCropHintsAnnotation(other.getCropHintsAnnotation()); if (other.hasWebDetection()) { mergeWebDetection(other.getWebDetection()); if (other.hasProductSearchResults()) { mergeProductSearchResults(other.getProductSearchResults()); if (other.hasError()) { mergeError(other.getError()); if (other.hasContext()) { mergeContext(other.getContext());
AnnotateImageRequest.newBuilder() .addFeatures(Feature.newBuilder().setType(Type.OBJECT_LOCALIZATION)) .setImage(img) .build(); requests.add(request); try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests); List<AnnotateImageResponse> responses = response.getResponsesList(); for (LocalizedObjectAnnotation entity : res.getLocalizedObjectAnnotationsList()) { out.format("Object name: %s\n", entity.getName()); out.format("Confidence: %s\n", entity.getScore());
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) { List<AsyncAnnotateFileRequest> requests = new ArrayList<>(); Feature feature = Feature.newBuilder().setType(Feature.Type.DOCUMENT_TEXT_DETECTION).build(); client.asyncBatchAnnotateFilesAsync(requests); System.out.format("\nText: %s\n", annotateImageResponse.getFullTextAnnotation().getText()); } else { System.out.println("No MATCH");
/** * * * <pre> * If set, represents the error message for the operation. * Note that filled-in image annotations are guaranteed to be * correct, even when `error` is set. * </pre> * * <code>.google.rpc.Status error = 9;</code> */ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { return getError(); }
/** * This method downloads an image from a URL and sends its contents to the Vision API for label detection. * * @param imageUrl the URL of the image * @param map the model map to use * @return a string with the list of labels and percentage of certainty * @throws CloudVisionTemplate if the Vision API call produces an error */ @GetMapping("/extractLabels") public ModelAndView extractLabels(String imageUrl, ModelMap map) { AnnotateImageResponse response = this.cloudVisionTemplate.analyzeImage( this.resourceLoader.getResource(imageUrl), Type.LABEL_DETECTION); Map<String, Float> imageLabels = response.getLabelAnnotationsList() .stream() .collect(Collectors.toMap( EntityAnnotation::getDescription, EntityAnnotation::getScore)); map.addAttribute("annotations", imageLabels); map.addAttribute("imageUrl", imageUrl); return new ModelAndView("result", map); }
/** * * * <pre> * If present, text (OCR) detection or document (OCR) text detection has * completed successfully. * This annotation provides the structural hierarchy for the OCR detected * text. * </pre> * * <code>.google.cloud.vision.v1.TextAnnotation full_text_annotation = 12;</code> */ public com.google.cloud.vision.v1.TextAnnotationOrBuilder getFullTextAnnotationOrBuilder() { return getFullTextAnnotation(); }