private static ConfigGeneralDetector confCorner() { ConfigGeneralDetector conf = new ConfigGeneralDetector(); conf.radius = 3; conf.threshold = 20; conf.maxFeatures = 150; return conf; }
@Override public void createNewProcessor() { ConfigGeneralDetector config = new ConfigGeneralDetector(); config.maxFeatures = maxFeatures; config.threshold = Math.max(20,45-(maxFeatures/20)); config.radius = 3; PointTracker<GrayU8> tracker = FactoryPointTracker.klt(new int[]{1,2,4},config,3,GrayU8.class, GrayS16.class); Log.i("KLT","maxFeatures = "+maxFeatures); setProcessing(new PointProcessing(tracker)); }
/** * Detects Kitchen and Rosenfeld corners. * * @param configDetector Configuration for feature detector. * @param derivType Type of derivative image. * @see boofcv.alg.feature.detect.intensity.KitRosCornerIntensity */ public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createKitRos(ConfigGeneralDetector configDetector, Class<D> derivType) { if( configDetector == null) configDetector = new ConfigGeneralDetector(); GeneralFeatureIntensity<T, D> intensity = new WrapperKitRosCornerIntensity<>(derivType); return createGeneral(intensity, configDetector); }
/** * Creates a Hessian based blob detector. * * @param type The type of Hessian based blob detector to use. DETERMINANT often works well. * @param configDetector Configuration for feature detector. * @param derivType Type of derivative image. * @see HessianBlobIntensity */ public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createHessian(HessianBlobIntensity.Type type, ConfigGeneralDetector configDetector, Class<D> derivType) { if( configDetector == null) configDetector = new ConfigGeneralDetector(); GeneralFeatureIntensity<T, D> intensity = FactoryIntensityPoint.hessian(type, derivType); return createGeneral(intensity, configDetector); }
/** * Detects Harris corners. * * @param configDetector Configuration for feature detector. * @param weighted Is a Gaussian weight applied to the sample region? False is much faster. * @param derivType Type of derivative image. * @see boofcv.alg.feature.detect.intensity.HarrisCornerIntensity */ public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createHarris(ConfigGeneralDetector configDetector, boolean weighted, Class<D> derivType) { if( configDetector == null) configDetector = new ConfigGeneralDetector(); GradientCornerIntensity<D> cornerIntensity = FactoryIntensityPointAlg.harris(configDetector.radius, 0.04f, weighted, derivType); return createGeneral(cornerIntensity, configDetector); }
/** * Detects Shi-Tomasi corners. * * @param configDetector Configuration for feature detector. * @param weighted Is a Gaussian weight applied to the sample region? False is much faster. * @param derivType Type of derivative image. * @see boofcv.alg.feature.detect.intensity.ShiTomasiCornerIntensity */ public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createShiTomasi(ConfigGeneralDetector configDetector, boolean weighted, Class<D> derivType) { if( configDetector == null) configDetector = new ConfigGeneralDetector(); GradientCornerIntensity<D> cornerIntensity = FactoryIntensityPointAlg.shiTomasi(configDetector.radius, weighted, derivType); return createGeneral(cornerIntensity, configDetector); }
/** * Creates a median filter corner detector. * * @param configDetector Configuration for feature detector. * @param imageType Type of input image. * @see boofcv.alg.feature.detect.intensity.MedianCornerIntensity */ public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createMedian(ConfigGeneralDetector configDetector, Class<T> imageType) { if( configDetector == null) configDetector = new ConfigGeneralDetector(); BlurStorageFilter<T> medianFilter = FactoryBlurFilter.median(imageType, configDetector.radius); GeneralFeatureIntensity<T, D> intensity = new WrapperMedianCornerIntensity<>(medianFilter, imageType); return createGeneral(intensity, configDetector); }
private MonocularPlaneVisualOdometry<I> createVisualOdometry( int whichAlg ) { Class derivType = GImageDerivativeOps.getDerivativeType(imageClass); if( whichAlg == 0 ) { PkltConfig config = new PkltConfig(); config.pyramidScaling = new int[]{1,2,4,8}; config.templateRadius = 3; ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600,3,1); PointTracker<I> tracker = FactoryPointTracker.klt(config, configDetector,imageClass,derivType); return FactoryVisualOdometry.monoPlaneInfinity(75,2,1.5,200, tracker, imageType); } else if( whichAlg == 1 ) { PkltConfig config = new PkltConfig(); config.pyramidScaling = new int[]{1,2,4,8}; config.templateRadius = 3; ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600,3,1); PointTracker<I> tracker = FactoryPointTracker.klt(config, configDetector,imageClass,derivType); double cellSize = 0.06; double inlierGroundTol = 1.5; return FactoryVisualOdometry.monoPlaneOverhead(cellSize,25,0.7, inlierGroundTol,300,2,100,0.5,0.6, tracker, imageType); } else { throw new RuntimeException("Unknown selection"); } }
public static EstimateCarMotion2D monoOverhead() { PkltConfig config = new PkltConfig(); config.pyramidScaling = new int[]{1,2,4,8}; config.templateRadius = 3; ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600,3,1); PointTracker<GrayF32> tracker = FactoryPointTracker.klt(config, configDetector,GrayF32.class, GrayF32.class); int ransacIterations = 300; double inlierGroundTol = 0.2; int thresholdRetire = 2; int absoluteMinimumTracks = 30; double respawnTrackFraction = 0.4; double respawnCoverageFraction = 0.6; ImageMotion2D<GrayF32,Se2_F64> motion2D = FactoryMotion2D.createMotion2D( ransacIterations, inlierGroundTol * inlierGroundTol, thresholdRetire, absoluteMinimumTracks, respawnTrackFraction, respawnCoverageFraction, false, tracker, new Se2_F64()); return new MonoOverhead_to_CarMotion2D(motion2D); }
public VideoTrackerPointFeaturesApp(Class<I> imageType, Class<D> derivType) { super(1,imageType); PkltConfig config = new PkltConfig(); config.templateRadius = 3; config.pyramidScaling = new int[]{1,2,4,8}; ConfigFastHessian configFH = new ConfigFastHessian(); configFH.maxFeaturesPerScale = 200; configFH.extractRadius = 4; configFH.detectThreshold = 15f; addAlgorithm(0,"KLT", FactoryPointTracker.klt(config, new ConfigGeneralDetector(maxFeatures, 1, 3), imageType, derivType)); addAlgorithm(0,"ST-BRIEF", FactoryPointTracker. dda_ST_BRIEF(200, new ConfigGeneralDetector(maxFeatures, 3, 1), imageType, derivType)); addAlgorithm(0,"ST-NCC", FactoryPointTracker. dda_ST_NCC(new ConfigGeneralDetector(maxFeatures, 3, 2), 5, imageType, derivType)); addAlgorithm(0,"FH-SURF", FactoryPointTracker. dda_FH_SURF_Fast(configFH, null, null, imageType)); addAlgorithm(0,"ST-SURF-KLT", FactoryPointTracker. combined_ST_SURF_KLT(new ConfigGeneralDetector(maxFeatures, 3, 1), config, 50, null, null, imageType, derivType)); addAlgorithm(0,"FH-SURF-KLT", FactoryPointTracker.combined_FH_SURF_KLT( config, 50, configFH, null, null, imageType)); gui.addMouseListener(this); gui.requestFocus(); setMainGUI(gui); }
public static EstimateCarMotion2D monoPlaneInfinity(double scale) { // specify how the image features are going to be tracked PkltConfig configKlt = new PkltConfig(); configKlt.pyramidScaling = new int[]{1,2,4,8}; configKlt.templateRadius = 5; PointTrackerTwoPass<GrayF32> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(-1, 3, 150), GrayF32.class, GrayF32.class); // declares the algorithm MonocularPlaneVisualOdometry<GrayF32> vo = FactoryVisualOdometry.monoPlaneInfinity(100, 2, 1.5, 200, tracker, ImageType.single(GrayF32.class)); if( scale != 1.0 ) { vo = new MonocularPlaneVisualOdometryScaleInput<GrayF32>(vo,scale); } return new Mono_to_CarMotion2D(vo); }
/** * Creates a Fast corner detector. * * @param configFast Configuration for FAST feature detector * @param configDetector Configuration for feature extractor. * @param imageType Type of input image. * @see FastCornerIntensity */ @SuppressWarnings("UnnecessaryLocalVariable") public static <T extends ImageGray, D extends ImageGray> GeneralFeatureDetector<T, D> createFast( ConfigFast configFast , ConfigGeneralDetector configDetector , Class<T> imageType) { if( configFast == null ) configFast = new ConfigFast(); configFast.checkValidity(); ConfigGeneralDetector d = configDetector; FastCornerIntensity<T> alg = FactoryIntensityPointAlg.fast(configFast.pixelTol, configFast.minContinuous, imageType); GeneralFeatureIntensity<T, D> intensity = new WrapperFastCornerIntensity<>(alg); ConfigGeneralDetector configExtract = new ConfigGeneralDetector(d.maxFeatures,d.radius,d.threshold,0,true,false,true); return createGeneral(intensity, configExtract); }
static StitchingFromMotion2D<GrayU8,Affine2D_F64> createStabilization( int which ) { PointTracker<GrayU8> tracker; if( which == 0 ) { ConfigGeneralDetector config = new ConfigGeneralDetector(); config.maxFeatures = 200; config.threshold = 40; config.radius = 3; tracker = FactoryPointTracker. klt(new int[]{1, 2, 4}, config, 3, GrayU8.class, GrayS16.class); } else { tracker = FactoryPointTracker.dda_FH_SURF_Fast(null,null,null,GrayU8.class); } ImageMotion2D<GrayU8,Affine2D_F64> motion = FactoryMotion2D.createMotion2D(100, 1.5, 2, 40, 0.5, 0.6, false, tracker, new Affine2D_F64()); return FactoryMotion2D.createVideoStitch(0.2,motion, ImageType.single(GrayU8.class)); }
public VideoMosaicSequentialPointApp(Class<I> imageType, Class<D> derivType) { super(2,imageType,true,new Mosaic2DPanel()); PkltConfig config = new PkltConfig(); config.templateRadius = 3; config.pyramidScaling = new int[]{1,2,4,8}; ConfigFastHessian configFH = new ConfigFastHessian(); configFH.initialSampleSize = 2; configFH.maxFeaturesPerScale = 200; addAlgorithm(0, "KLT", FactoryPointTracker.klt(config, new ConfigGeneralDetector(maxFeatures, 3, 1), imageType, derivType)); addAlgorithm(0, "ST-BRIEF", FactoryPointTracker. dda_ST_BRIEF(150, new ConfigGeneralDetector(400, 1, 10), imageType, null)); // size of the description region has been increased to improve quality. addAlgorithm(0, "ST-NCC", FactoryPointTracker. dda_ST_NCC(new ConfigGeneralDetector(500, 3, 9), 10, imageType, derivType)); addAlgorithm(0, "FH-SURF", FactoryPointTracker.dda_FH_SURF_Fast(configFH, null, null, imageType)); addAlgorithm(0, "ST-SURF-KLT", FactoryPointTracker. combined_ST_SURF_KLT(new ConfigGeneralDetector(400, 3, 1), config, 75, null, null, imageType, derivType)); addAlgorithm(0, "FH-SURF-KLT", FactoryPointTracker.combined_FH_SURF_KLT( config, 75, configFH, null, null, imageType)); addAlgorithm(1,"Affine", new Affine2D_F64()); addAlgorithm(1,"Homography", new Homography2D_F64()); absoluteMinimumTracks = 40; respawnTrackFraction = 0.3; respawnCoverageFraction = 0.8; maxJumpFraction = 0.3; inlierThreshold = 4; }
public VideoStabilizeSequentialPointApp(Class<I> imageType, Class<D> derivType) { super(2,imageType,true,new Stabilize2DPanel()); PkltConfig config = new PkltConfig(); config.templateRadius = 3; config.pyramidScaling = new int[]{1,2,4,8}; ConfigFastHessian configFH = new ConfigFastHessian(); configFH.maxFeaturesPerScale = 200; configFH.initialSampleSize = 2; addAlgorithm(0, "KLT", FactoryPointTracker.klt(config, new ConfigGeneralDetector(maxFeatures, 1, 3), imageType,derivType)); addAlgorithm(0, "ST-BRIEF", FactoryPointTracker. dda_ST_BRIEF(100, new ConfigGeneralDetector(400, 1, 10), imageType, derivType)); // size of the description region has been increased to improve quality. addAlgorithm(0, "ST-NCC", FactoryPointTracker. dda_ST_NCC(new ConfigGeneralDetector(500, 3, 10), 5, imageType, derivType)); addAlgorithm(0, "FH-SURF", FactoryPointTracker.dda_FH_SURF_Fast(configFH, null, null, imageType)); addAlgorithm(0, "ST-SURF-KLT", FactoryPointTracker. combined_ST_SURF_KLT(new ConfigGeneralDetector(400, 3, 1), config, 50, null, null, imageType, derivType)); addAlgorithm(0, "FH-SURF-KLT", FactoryPointTracker.combined_FH_SURF_KLT( config, 50, configFH, null, null, imageType)); addAlgorithm(1,"Affine", new Affine2D_F64()); addAlgorithm(1,"Homography", new Homography2D_F64()); absoluteMinimumTracks = 40; respawnTrackFraction = 0.3; respawnCoverageFraction = 0.5; maxJumpFraction = 0.3; inlierThreshold = 4; }
public static EstimateCarMotion2D stereo01( double scale ) { // specify how the image features are going to be tracked PkltConfig configKlt = new PkltConfig(); configKlt.pyramidScaling = new int[]{1,2,4,8}; configKlt.templateRadius = 5; PointTrackerTwoPass<GrayF32> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(-1, 3, 150), GrayF32.class, GrayF32.class); // computes the depth of each point StereoDisparitySparse<GrayF32> disparity = FactoryStereoDisparity.regionSparseWta(0, 10, 3, 3, 20, 0.15, true, GrayF32.class); // declares the algorithm StereoVisualOdometry<GrayF32> vo = FactoryVisualOdometry.stereoDepth(1.5, 200, 2, 200, 50, false, disparity, tracker, GrayF32.class); if( scale != 1.0 ) { vo = new StereoVisualOdometryScaleInput<GrayF32>(vo,scale); } return new StereoVO_to_CarMotion2D(vo); } }
algType = AlgType.FEATURE; ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1); algType = AlgType.FEATURE; ConfigGeneralDetector configExtract = new ConfigGeneralDetector(600, 3, 1); combined_ST_SURF_KLT(new ConfigGeneralDetector(600, 3, 1), pkltConfig, 50, null, null, imageType, derivType);
private void handleAlgorithmChanged() { ConfigGeneralDetector config = new ConfigGeneralDetector(); config.radius = controlPanel.radius; config.maxFeatures = 200;
createHarris(new ConfigGeneralDetector(NUM_FEATURES,r, 1), false, derivType);
public static InterestPointDetector createDetector( int detect , Class imageType ) { Class derivType = GImageDerivativeOps.getDerivativeType(imageType); GeneralFeatureDetector general; switch( detect ) { case DETECT_FH: return FactoryInterestPoint.fastHessian(confDetectFH()); case DETECT_SIFT: return FactoryInterestPoint.sift(null,confDetectSift(),imageType); case DETECT_SHITOMASI: general = FactoryDetectPoint.createShiTomasi(confCorner(),false,derivType); break; case DETECT_HARRIS: general = FactoryDetectPoint.createHarris(confCorner(), false, derivType); break; case DETECT_FAST: general = FactoryDetectPoint.createFast(new ConfigFastCorner(20,9),new ConfigGeneralDetector(150,3,20), imageType); break; default: throw new RuntimeException("Unknown detector"); } return FactoryInterestPoint.wrapPoint(general,1.0,imageType,derivType); }