Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); for(int i=0;i<contours.size();i++){ //Convert contours(i) from MatOfPoint to MatOfPoint2f contours.get(i).convertTo(mMOP2f1, CvType.CV_32FC2); //Processing on mMOP2f1 which is in type MatOfPoint2f Imgproc.approxPolyDP(mMOP2f1, mMOP2f2, approxDistance, true); //Convert back to MatOfPoint and put the new values back into the contours list mMOP2f2.convertTo(contours.get(i), CvType.CV_32S); }
Mat croppedimage = cropImage(image,rect); Mat resizeimage = new Mat(); Size sz = new Size(100,100); Imgproc.resize( croppedimage, resizeimage, sz );
String inputFileName="simm_01"; String inputExtension = "jpg"; String inputDir = getCacheDir().getAbsolutePath(); // use the cache directory for i/o String outputDir = getCacheDir().getAbsolutePath(); String outputExtension = "png"; String inputFilePath = inputDir + File.separator + inputFileName + "." + inputExtension; Log.d (this.getClass().getSimpleName(), "loading " + inputFilePath + "..."); Mat image = Imgcodecs.imread(inputFilePath); Log.d (this.getClass().getSimpleName(), "width of " + inputFileName + ": " + image.width()); // if width is 0 then it did not read your image. // for the canny edge detection algorithm, play with these to see different results int threshold1 = 70; int threshold2 = 100; Mat im_canny = new Mat(); // you have to initialize output image before giving it to the Canny method Imgproc.Canny(image, im_canny, threshold1, threshold2); String cannyFilename = outputDir + File.separator + inputFileName + "_canny-" + threshold1 + "-" + threshold2 + "." + outputExtension; Log.d (this.getClass().getSimpleName(), "Writing " + cannyFilename); Imgcodecs.imwrite(cannyFilename, im_canny);
Mat mYuv = new Mat(); Mat mRgba = new Mat(); Mat thresholdImage = new Mat(getFrameHeight() + getFrameHeight() / 2, getFrameWidth(), CvType.CV_8UC1); mYuv.put(0, 0, data); Imgproc.cvtColor(mYuv, mRgba, Imgproc.COLOR_YUV420sp2RGB, 4); Imgproc.cvtColor(mRgba, thresholdImage, Imgproc.COLOR_RGB2GRAY, 4); Imgproc.Canny(thresholdImage, thresholdImage, 80, 100, 3); Mat lines = new Mat(); int threshold = 50; int minLineSize = 20; int lineGap = 20; Imgproc.HoughLinesP(thresholdImage, lines, 1, Math.PI/180, threshold, minLineSize, lineGap); for (int x = 0; x < lines.cols(); x++) { double[] vec = lines.get(0, x); double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3]; Point start = new Point(x1, y1); Point end = new Point(x2, y2); Core.line(mRgba, start, end, new Scalar(255,0,0), 3); } Bitmap bmp = Bitmap.createBitmap(getFrameWidth(), getFrameHeight(), Bitmap.Config.ARGB_8888); if (Utils.matToBitmap(mRgba, bmp)) return bmp;
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
Mat featuredImg = new Mat(); Scalar kpColor = new Scalar(255,159,10);//this will be color of keypoints //featuredImg will be the output of first image Features2d.drawKeypoints(img1, keypoints1, featuredImg , kpColor, 0); //featuredImg will be the output of first image Features2d.drawKeypoints(img1, keypoints1, featuredImg , kpColor, 0);
public static Mat getNewMat() { SX.loadNative(SX.NATIVES.OPENCV); return new Mat(); }
public static Mat getNewMat(Size size, int type, int fill) { SX.loadNative(SX.NATIVES.OPENCV); switch (type) { case 1: type = CvType.CV_8UC1; break; case 3: type = CvType.CV_8UC3; break; case 4: type = CvType.CV_8UC4; break; default: type = -1; } if (type < 0) { return new Mat(); } Mat result; if (fill < 0) { result = new Mat(size, type); } else { result = new Mat(size, type, new Scalar(fill)); } return result; }
public LinkedList<CvSurfFeature> computeSurfKeypoints(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); List<KeyPoint> myKeys; // Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE // detector.detect(img_object, keypoints); byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData(); Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1); Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR? byte[] dataGray = new byte[matGray.rows()*matGray.cols()*(int)(matGray.elemSize())]; matGray.get(0, 0, dataGray); detector.detect(matGray, keypoints); myKeys = keypoints.toList(); LinkedList<CvSurfFeature> myKeypoints = new LinkedList<CvSurfFeature>(); KeyPoint key; CvSurfFeature feat; for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext(); ) { key = iterator.next(); feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, null); myKeypoints.add(feat); } return myKeypoints; }
public LinkedList<CvSiftFeature> computeSiftKeypoints(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); List<KeyPoint> myKeys; // Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE // detector.detect(img_object, keypoints); byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData(); Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1); Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR? byte[] dataGray = new byte[matGray.rows()*matGray.cols()*(int)(matGray.elemSize())]; matGray.get(0, 0, dataGray); detector.detect(matGray, keypoints); myKeys = keypoints.toList(); LinkedList<CvSiftFeature> myKeypoints = new LinkedList<CvSiftFeature>(); KeyPoint key; CvSiftFeature feat; for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext(); ) { key = iterator.next(); feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, null); myKeypoints.add(feat); } return myKeypoints; }
public LinkedList<CvSurfFeature> computeSurfKeypoints(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); List<KeyPoint> myKeys; // Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE // detector.detect(img_object, keypoints); byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData(); Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1); Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR? byte[] dataGray = new byte[matGray.rows()*matGray.cols()*(int)(matGray.elemSize())]; matGray.get(0, 0, dataGray); detector.detect(matGray, keypoints); myKeys = keypoints.toList(); LinkedList<CvSurfFeature> myKeypoints = new LinkedList<CvSurfFeature>(); KeyPoint key; CvSurfFeature feat; for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext(); ) { key = iterator.next(); feat = new CvSurfFeature(key.pt.x, key.pt.y, key.size, null); myKeypoints.add(feat); } return myKeypoints; }
public LinkedList<CvSiftFeature> computeSiftKeypoints(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); List<KeyPoint> myKeys; // Mat img_object = Highgui.imread(image, 0); //0 = CV_LOAD_IMAGE_GRAYSCALE // detector.detect(img_object, keypoints); byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData(); Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1); Imgproc.cvtColor(matRGB, matGray, Imgproc.COLOR_BGR2GRAY); //TODO: RGB or BGR? byte[] dataGray = new byte[matGray.rows()*matGray.cols()*(int)(matGray.elemSize())]; matGray.get(0, 0, dataGray); detector.detect(matGray, keypoints); myKeys = keypoints.toList(); LinkedList<CvSiftFeature> myKeypoints = new LinkedList<CvSiftFeature>(); KeyPoint key; CvSiftFeature feat; for (Iterator<KeyPoint> iterator = myKeys.iterator(); iterator.hasNext(); ) { key = iterator.next(); feat = new CvSiftFeature(key.pt.x, key.pt.y, key.size, null); myKeypoints.add(feat); } return myKeypoints; }
@Override public void extract(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); Mat descriptors = new Mat(); List<KeyPoint> myKeys; Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1);
@Override public void extract(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); Mat descriptors = new Mat(); List<KeyPoint> myKeys; Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1);
@Override public void extract(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); Mat descriptors = new Mat(); List<KeyPoint> myKeys; Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1);
@Override public void extract(BufferedImage img) { MatOfKeyPoint keypoints = new MatOfKeyPoint(); Mat descriptors = new Mat(); List<KeyPoint> myKeys; Mat matRGB = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3); matRGB.put(0, 0, data); Mat matGray = new Mat(img.getHeight(),img.getWidth(),CvType.CV_8UC1);
private List<Mat> checkMat(Mat mat) { List<Mat> mats = new ArrayList<>(); if (CvType.CV_8UC1 == mat.type() || CvType.CV_8UC3 == mat.type()) { mats.add(mat); } else if (CvType.CV_8UC4 == mat.type()) { List<Mat> matsBGRA = new ArrayList<>(); Core.split(mat, matsBGRA); Mat mBGR = new Mat(mat.size(), CvType.CV_8UC3); Mat matA = matsBGRA.remove(3); Core.merge(matsBGRA, mBGR); mats.add(mBGR); MatOfDouble mStdDev = new MatOfDouble(); Core.meanStdDev(matA, new MatOfDouble(), mStdDev); if (0 < mStdDev.toArray()[0]) { mats.add(matA); } } return mats; }
FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB); DescriptorExtractor descriptor = DescriptorExtractor.create(DescriptorExtractor.ORB);; DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING); //first image Mat img1 = Highgui.imread("<image1 path>"); Mat descriptors1 = new Mat(); MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); detector.detect(img1, keypoints1); descriptor.compute(img1, keypoints1, descriptors1); //second image Mat img2 = Highgui.imread("<image2 path>"); Mat descriptors2 = new Mat(); MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); detector.detect(img2, keypoints2); descriptor.compute(img2, keypoints2, descriptors2); //matcher should include 2 different image's descriptors MatOfDMatch matches = new MatOfDMatch(); matcher.match(descriptors1,descriptors2,matches); //feature and connection colors Scalar RED = new Scalar(255,0,0); Scalar GREEN = new Scalar(0,255,0); //output image Mat outputImg = new Mat(); MatOfByte drawnMatches = new MatOfByte(); //this will draw all matches, works fine Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, outputImg, GREEN, RED, drawnMatches, Features2d.NOT_DRAW_SINGLE_POINTS);
public static Mat drawContoursInImage(List<MatOfPoint> contours, Mat mBase) { Mat mResult = Element.getNewMat(); Mat mWork = new Mat(); Imgproc.cvtColor(mBase, mWork, toGray); Imgproc.cvtColor(mWork, mResult, toColor); Imgproc.drawContours(mResult, contours, -1, new Scalar(0, 0, 255)); return mResult; }
private void actionMasking() { if (masking) { if (SX.isNotNull(mask)) { mMask = new Mat(shot.getContent().size(), CvType.CV_8UC1); if (!maskingToggle) { mMask.setTo(new Scalar(0)); Core.split(shot.getContent(), mBGRA); mBGRA.add(mMask); Mat mResult = new Mat(mMask.size(), CvType.CV_8UC4); Core.merge(mBGRA, mResult); shotMasked = new Picture(mResult);