diff --git a/packages/dartcv/.gitignore b/packages/dartcv/.gitignore index eee74c38..91f14d85 100644 --- a/packages/dartcv/.gitignore +++ b/packages/dartcv/.gitignore @@ -8,3 +8,4 @@ pubspec.lock test/models libdartcv* +!example/ diff --git a/packages/dartcv/.pubignore b/packages/dartcv/.pubignore index a8fe51c2..2798a883 100644 --- a/packages/dartcv/.pubignore +++ b/packages/dartcv/.pubignore @@ -16,3 +16,4 @@ ffigen/ Makefile libdartcv* !example/ +*.ttf diff --git a/packages/dartcv/CHANGELOG.md b/packages/dartcv/CHANGELOG.md index acab7526..362fa500 100644 --- a/packages/dartcv/CHANGELOG.md +++ b/packages/dartcv/CHANGELOG.md @@ -1,3 +1,11 @@ +## 1.1.8 + +* bump dependencies +* add `UMat` +* [features2d] add more functions/constructions +* add custom loggers, support user-defined logger callback +* add `cv.freetype` module + ## 1.1.7 * add `Mat.reinterpret` diff --git a/packages/dartcv/bin/gen_cmake_vars.dart b/packages/dartcv/bin/gen_cmake_vars.dart index 07dfc111..05de6e89 100644 --- a/packages/dartcv/bin/gen_cmake_vars.dart +++ b/packages/dartcv/bin/gen_cmake_vars.dart @@ -11,6 +11,7 @@ const defaultModuleSettings = { "dnn": "ON", "features2d": "ON", "flann": "ON", + "freetype": "OFF", // "gapi", // disabled "highgui": "OFF", "imgproc": "ON", diff --git a/packages/dartcv/ffigen/ffigen_calib3d.yaml b/packages/dartcv/ffigen/ffigen_calib3d.yaml index 0a9a740c..1274c674 100644 --- a/packages/dartcv/ffigen/ffigen_calib3d.yaml +++ b/packages/dartcv/ffigen/ffigen_calib3d.yaml @@ -33,7 +33,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_const.yaml b/packages/dartcv/ffigen/ffigen_const.yaml index f8872260..4cf268cf 100644 --- a/packages/dartcv/ffigen/ffigen_const.yaml +++ b/packages/dartcv/ffigen/ffigen_const.yaml @@ -17,7 +17,6 @@ headers: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_contrib.yaml b/packages/dartcv/ffigen/ffigen_contrib.yaml index 87b68d07..3cc6d946 100644 --- a/packages/dartcv/ffigen/ffigen_contrib.yaml +++ b/packages/dartcv/ffigen/ffigen_contrib.yaml @@ -22,6 +22,7 @@ headers: - ../src/dartcv/contrib/wechat_qrcode.h - ../src/dartcv/contrib/ximgproc.h - ../src/dartcv/contrib/xobjdetect.h + - ../src/dartcv/freetype/freetype.h include-directives: - ../src/dartcv/contrib/aruco.h - ../src/dartcv/contrib/img_hash.h @@ -29,6 +30,7 @@ headers: - ../src/dartcv/contrib/wechat_qrcode.h - ../src/dartcv/contrib/ximgproc.h - ../src/dartcv/contrib/xobjdetect.h + - ../src/dartcv/freetype/freetype.h functions: symbol-address: @@ -39,7 +41,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_core.yaml b/packages/dartcv/ffigen/ffigen_core.yaml index 7d5dbce3..3d4e7e2e 100644 --- a/packages/dartcv/ffigen/ffigen_core.yaml +++ b/packages/dartcv/ffigen/ffigen_core.yaml @@ -66,7 +66,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_dnn.yaml b/packages/dartcv/ffigen/ffigen_dnn.yaml index 8c000f5f..367a3476 100644 --- a/packages/dartcv/ffigen/ffigen_dnn.yaml +++ b/packages/dartcv/ffigen/ffigen_dnn.yaml @@ -31,7 +31,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_features2d.yaml b/packages/dartcv/ffigen/ffigen_features2d.yaml index 00dfdb01..5ec43a90 100644 --- a/packages/dartcv/ffigen/ffigen_features2d.yaml +++ b/packages/dartcv/ffigen/ffigen_features2d.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_gapi.yaml b/packages/dartcv/ffigen/ffigen_gapi.yaml index 9640e65e..78531adb 100644 --- a/packages/dartcv/ffigen/ffigen_gapi.yaml +++ b/packages/dartcv/ffigen/ffigen_gapi.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_highgui.yaml b/packages/dartcv/ffigen/ffigen_highgui.yaml index d9d05cce..fefa64c3 100644 --- a/packages/dartcv/ffigen/ffigen_highgui.yaml +++ b/packages/dartcv/ffigen/ffigen_highgui.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_imgcodecs.yaml b/packages/dartcv/ffigen/ffigen_imgcodecs.yaml index 105f6fd5..96778041 100644 --- a/packages/dartcv/ffigen/ffigen_imgcodecs.yaml +++ b/packages/dartcv/ffigen/ffigen_imgcodecs.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_imgproc.yaml b/packages/dartcv/ffigen/ffigen_imgproc.yaml index 284f342c..00943b45 100644 --- a/packages/dartcv/ffigen/ffigen_imgproc.yaml +++ b/packages/dartcv/ffigen/ffigen_imgproc.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_objdetect.yaml b/packages/dartcv/ffigen/ffigen_objdetect.yaml index 3a43ec29..9077b34b 100644 --- a/packages/dartcv/ffigen/ffigen_objdetect.yaml +++ b/packages/dartcv/ffigen/ffigen_objdetect.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_photo.yaml b/packages/dartcv/ffigen/ffigen_photo.yaml index 8dda3404..8810f22e 100644 --- a/packages/dartcv/ffigen/ffigen_photo.yaml +++ b/packages/dartcv/ffigen/ffigen_photo.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_stitching.yaml b/packages/dartcv/ffigen/ffigen_stitching.yaml index c4d83b8a..add0e024 100644 --- a/packages/dartcv/ffigen/ffigen_stitching.yaml +++ b/packages/dartcv/ffigen/ffigen_stitching.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_types.yaml b/packages/dartcv/ffigen/ffigen_types.yaml index 89ae149a..f2b72b40 100644 --- a/packages/dartcv/ffigen/ffigen_types.yaml +++ b/packages/dartcv/ffigen/ffigen_types.yaml @@ -46,7 +46,6 @@ type-map: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_video.yaml b/packages/dartcv/ffigen/ffigen_video.yaml index cc6c3cbf..5be295a9 100644 --- a/packages/dartcv/ffigen/ffigen_video.yaml +++ b/packages/dartcv/ffigen/ffigen_video.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/ffigen/ffigen_videoio.yaml b/packages/dartcv/ffigen/ffigen_videoio.yaml index 1f2bd158..3dd0d40e 100644 --- a/packages/dartcv/ffigen/ffigen_videoio.yaml +++ b/packages/dartcv/ffigen/ffigen_videoio.yaml @@ -29,7 +29,6 @@ functions: preamble: | // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language - // some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/contrib.dart b/packages/dartcv/lib/contrib.dart index 94b390b8..7393766c 100644 --- a/packages/dartcv/lib/contrib.dart +++ b/packages/dartcv/lib/contrib.dart @@ -7,6 +7,7 @@ library dartcv.contrib; export 'src/contrib/aruco.dart'; export 'src/contrib/aruco_async.dart'; export 'src/contrib/aruco_dict.dart'; +export 'src/contrib/freetype.dart'; export 'src/contrib/img_hash.dart'; export 'src/contrib/quality.dart'; export 'src/contrib/wechat_qrcode.dart'; diff --git a/packages/dartcv/lib/core.dart b/packages/dartcv/lib/core.dart index dfc1b99c..8374bf4b 100644 --- a/packages/dartcv/lib/core.dart +++ b/packages/dartcv/lib/core.dart @@ -25,4 +25,5 @@ export 'src/core/scalar.dart'; export 'src/core/size.dart'; export 'src/core/svd.dart'; export 'src/core/termcriteria.dart'; +export 'src/core/umat.dart'; export 'src/core/vec.dart'; diff --git a/packages/dartcv/lib/features2d.dart b/packages/dartcv/lib/features2d.dart index a5f2f943..443c1325 100644 --- a/packages/dartcv/lib/features2d.dart +++ b/packages/dartcv/lib/features2d.dart @@ -6,4 +6,6 @@ library dartcv.features2d; export 'src/features2d/features2d.dart'; export 'src/features2d/features2d_async.dart'; +export 'src/features2d/features2d_base.dart'; +export 'src/features2d/features2d_enum.dart'; export 'src/g/features2d.g.dart' show FlannIndexType, FlannAlgorithm, FlannDistance; diff --git a/packages/dartcv/lib/src/contrib/freetype.dart b/packages/dartcv/lib/src/contrib/freetype.dart new file mode 100644 index 00000000..38bd72c7 --- /dev/null +++ b/packages/dartcv/lib/src/contrib/freetype.dart @@ -0,0 +1,307 @@ +// Copyright (c) 2025, rainyl and all contributors. All rights reserved. +// Use of this source code is governed by a Apache-2.0 license +// that can be found in the LICENSE file. + +library cv.freetype; + +import 'dart:ffi' as ffi; +import 'dart:typed_data'; + +import 'package:ffi/ffi.dart'; + +import '../core/base.dart'; +import '../core/mat.dart'; +import '../core/point.dart'; +import '../core/scalar.dart'; +import '../core/size.dart'; +import '../g/constants.g.dart' show LINE_8; +import '../g/contrib.g.dart' as cvg; +import '../native_lib.dart' show ccontrib; + +class FreeType2 extends CvStruct { + FreeType2._(super.ptr, [bool attach = true]) : super.fromPointer() { + if (attach) { + finalizer.attach(this, ptr.cast(), detach: this); + } + } + + factory FreeType2.fromPointer(cvg.FreeType2Ptr ptr, [bool attach = true]) => FreeType2._(ptr, attach); + + /// Create a [FreeType2] object. + /// + /// [filename] FontFile Name, if not null, will call [loadFontData] internally. + /// [idx] face_index to select a font faces in a single file. + factory FreeType2.create({String? filename, int idx = 0}) { + final p = calloc(); + cvRun(() => ccontrib.cv_freetype_FreeType2_create(p)); + final ft = FreeType2._(p); + if (filename != null) { + ft.loadFontData(filename, idx); + } + return ft; + } + + /// Load font data. + /// + /// The function loadFontData loads font data from file. + /// + /// [filename] FontFile Name + /// [idx] face_index to select a font faces in a single file. + void loadFontData(String filename, int idx) { + final cname = filename.toNativeUtf8().cast(); + cvRun(() => ccontrib.cv_freetype_FreeType2_loadFontData(ref, cname, idx, ffi.nullptr)); + calloc.free(cname); + } + + /// async version of [loadFontData] + Future loadFontDataAsync(String filename, int idx) async { + final cname = filename.toNativeUtf8().cast(); + await cvRunAsync0( + (callback) => ccontrib.cv_freetype_FreeType2_loadFontData(ref, cname, idx, callback), + (c) { + calloc.free(cname); + c.complete(); + }, + ); + } + + /// Load font data. + /// + /// The function loadFontData loads font data from memory. + /// The data is not copied, the user needs to make sure the data lives at least as long as FreeType2. + /// After the FreeType2 object is destroyed, the buffer can be safely deallocated. + /// + /// [buffer] buffer containing font data + /// [idx] face_index to select a font faces in a single file. + void loadFontBuffer(Uint8List buffer, int idx) { + final cbuffer = malloc(buffer.length); + cbuffer.asTypedList(buffer.length).setAll(0, buffer); + cvRun( + () => ccontrib.cv_freetype_FreeType2_loadFontData_buf( + ref, + cbuffer.cast(), + buffer.length, + idx, + ffi.nullptr, + ), + ); + malloc.free(cbuffer); + } + + /// async version of [loadFontBuffer] + Future loadFontBufferAsync(Uint8List buffer, int idx) async { + final cbuffer = malloc(buffer.length); + cbuffer.asTypedList(buffer.length).setAll(0, buffer); + await cvRunAsync0( + (callback) => ccontrib.cv_freetype_FreeType2_loadFontData_buf( + ref, + cbuffer.cast(), + buffer.length, + idx, + callback, + ), + (c) { + malloc.free(cbuffer); + c.complete(); + }, + ); + } + + /// Set Split Number from Bezier-curve to line + /// + /// The function [setSplitNumber] set the number of split points from bezier-curve to line. + /// + /// If you want to draw large glyph, large is better. + /// + /// If you want to draw small glyph, small is better. + void setSplitNumber(int num) { + cvRun(() => ccontrib.cv_freetype_FreeType2_setSplitNumber(ref, num)); + } + + /// Draws a text string. + /// The function putText renders the specified text string in the image. + /// Symbols that cannot be rendered using the specified font are replaced by "Tofu" or non-drawn. + /// + /// + /// [img] Image. (Only 8UC1/8UC3/8UC4 2D mat is supported.) + /// + /// [text] Text string to be drawn. + /// + /// [org] Bottom-left/Top-left corner of the text string in the image. + /// + /// [fontHeight] Drawing font size by pixel unit. + /// + /// [color] Text color. + /// + /// [thickness] Thickness of the lines used to draw a text when negative, the glyph is filled. Otherwise, the glyph is drawn with this thickness. + /// + /// [lineType] Line type. See the line for details. + /// + /// [bottomLeftOrigin] When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner. + void putText( + InputOutputArray img, + String text, + Point org, + int fontHeight, + Scalar color, { + int thickness = 1, + int lineType = LINE_8, + bool bottomLeftOrigin = false, + }) { + final ctext = text.toNativeUtf8().cast(); + cvRun( + () => ccontrib.cv_freetype_FreeType2_putText( + ref, + img.ref, + ctext, + org.ref, + fontHeight, + color.ref, + thickness, + lineType, + bottomLeftOrigin, + ffi.nullptr, + ), + ); + calloc.free(ctext); + } + + /// Async version of [putText] + /// + /// Draws a text string. + /// The function putText renders the specified text string in the image. + /// Symbols that cannot be rendered using the specified font are replaced by "Tofu" or non-drawn. + /// + /// [img] Image. (Only 8UC1/8UC3/8UC4 2D mat is supported.) + /// + /// [text] Text string to be drawn. + /// + /// [org] Bottom-left/Top-left corner of the text string in the image. + /// + /// [fontHeight] Drawing font size by pixel unit. + /// + /// [color] Text color. + /// + /// [thickness] Thickness of the lines used to draw a text when negative, the glyph is filled. Otherwise, the glyph is drawn with this thickness. + /// + /// [lineType] Line type. See the line for details. + /// + /// [bottomLeftOrigin] When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner. + Future putTextAsync( + InputOutputArray img, + String text, + Point org, + int fontHeight, + Scalar color, { + int thickness = 1, + int lineType = LINE_8, + bool bottomLeftOrigin = false, + }) async { + final ctext = text.toNativeUtf8().cast(); + return cvRunAsync0( + (callback) => ccontrib.cv_freetype_FreeType2_putText( + ref, + img.ref, + ctext, + org.ref, + fontHeight, + color.ref, + thickness, + lineType, + bottomLeftOrigin, + callback, + ), + (c) { + calloc.free(ctext); + return c.complete(); + }, + ); + } + + /// Calculates the width and height of a text string. + /// + /// The function getTextSize calculates and returns the approximate size of a box that contains the specified text. + /// That is, the following code renders some text, the tight box surrounding it, and the baseline: : + /// ```c++ + /// String text = "Funny text inside the box"; + /// int fontHeight = 60; + /// int thickness = -1; + /// int linestyle = LINE_8; + /// + /// Mat img(600, 800, CV_8UC3, Scalar::all(0)); + /// + /// int baseline=0; + /// + /// cv::Ptr ft2; + /// ft2 = cv::freetype::createFreeType2(); + /// ft2->loadFontData( "./mplus-1p-regular.ttf", 0 ); + /// + /// Size textSize = ft2->getTextSize(text, + /// fontHeight, + /// thickness, + /// &baseline); + /// + /// if(thickness > 0){ + /// baseline += thickness; + /// } + /// + /// // center the text + /// Point textOrg((img.cols - textSize.width) / 2, + /// (img.rows + textSize.height) / 2); + /// + /// // draw the box + /// rectangle(img, textOrg + Point(0, baseline), + /// textOrg + Point(textSize.width, -textSize.height), + /// Scalar(0,255,0),1,8); + /// + /// // ... and the baseline first + /// line(img, textOrg + Point(0, thickness), + /// textOrg + Point(textSize.width, thickness), + /// Scalar(0, 0, 255),1,8); + /// + /// // then put the text itself + /// ft2->putText(img, text, textOrg, fontHeight, + /// Scalar::all(255), thickness, linestyle, true ); + /// ``` + /// + /// [text] Input text string. + /// [fontHeight] Drawing font size by pixel unit. + /// [thickness] Thickness of lines used to render the text. See putText for details. + /// + /// Return: + /// - [Size] size, The size of a box that contains the specified text. + /// - [int] baseLine y-coordinate of the baseline relative to the bottom-most text point. + /// + /// Also see [putText] + (Size size, int baseline) getTextSize(String text, int fontHeight, int thickness) { + final pBaseline = calloc(); + final pSize = calloc(); + final textPtr = text.toNativeUtf8().cast(); + cvRun( + () => ccontrib.cv_freetype_FreeType2_getTextSize( + ref, + textPtr, + fontHeight, + thickness, + pBaseline, + pSize, + ffi.nullptr, + ), + ); + final rval = (Size.fromPointer(pSize), pBaseline.value); + calloc.free(pBaseline); + return rval; + } + + static final finalizer = OcvFinalizer( + ccontrib.addresses.cv_freetype_FreeType2_close, + ); + + void dispose() { + finalizer.detach(this); + ccontrib.cv_freetype_FreeType2_close(ptr); + } + + @override + cvg.FreeType2 get ref => ptr.ref; +} diff --git a/packages/dartcv/lib/src/core/base.dart b/packages/dartcv/lib/src/core/base.dart index f21ec7c7..8f1d423c 100644 --- a/packages/dartcv/lib/src/core/base.dart +++ b/packages/dartcv/lib/src/core/base.dart @@ -20,6 +20,26 @@ import "exception.dart" show CvException, CvdException; const double CV_PI = 3.1415926535897932384626433832795; const double CV_2PI = 6.283185307179586476925286766559; const double CV_LOG2 = 0.69314718055994530941723212145818; +const int CV_MAX_DIM = 32; +const int CV_CN_MAX = 512; +const int CV_CN_SHIFT = 3; +const int CV_DEPTH_MAX = 1 << CV_CN_SHIFT; +const int CV_MAT_CN_MASK = (CV_CN_MAX - 1) << CV_CN_SHIFT; +// const int CV_MAT_CN(flags) = ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1); +const int CV_MAT_TYPE_MASK = CV_DEPTH_MAX * CV_CN_MAX - 1; +// const int CV_MAT_TYPE(flags) = ((flags) & CV_MAT_TYPE_MASK); +const int CV_MAT_CONT_FLAG_SHIFT = 14; +const int CV_MAT_CONT_FLAG = 1 << CV_MAT_CONT_FLAG_SHIFT; +// const int CV_IS_MAT_CONT(flags) = ((flags) & CV_MAT_CONT_FLAG); +// const int CV_IS_CONT_MAT = CV_IS_MAT_CONT; +const int CV_SUBMAT_FLAG_SHIFT = 15; +const int CV_SUBMAT_FLAG = 1 << CV_SUBMAT_FLAG_SHIFT; +// const int CV_IS_SUBMAT(flags) = ((flags) & CV_MAT_SUBMAT_FLAG); + +int CV_MAT_CN(int flags) => ((flags & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1; +int CV_MAT_TYPE(int flags) => flags & CV_MAT_TYPE_MASK; +bool CV_IS_MAT_CONT(int flags) => (flags & CV_MAT_CONT_FLAG) != 0; +bool CV_IS_SUBMAT(int flags) => (flags & CV_SUBMAT_FLAG) != 0; const int CV_U8_MAX = 255; // uchar const int CV_U8_MIN = 0; @@ -209,8 +229,8 @@ void cvAssert(bool condition, [String? msg]) { } // finalizers -typedef NativeFinalizerFunctionT = - ffi.Pointer>; +typedef NativeFinalizerFunctionT + = ffi.Pointer>; ffi.NativeFinalizer OcvFinalizer(NativeFinalizerFunctionT func) => ffi.NativeFinalizer(func.cast()); diff --git a/packages/dartcv/lib/src/core/core.dart b/packages/dartcv/lib/src/core/core.dart index 726d0853..6c09f2e2 100644 --- a/packages/dartcv/lib/src/core/core.dart +++ b/packages/dartcv/lib/src/core/core.dart @@ -22,27 +22,158 @@ import 'scalar.dart'; import 'termcriteria.dart'; import 'vec.dart'; -/// Constants for log levels -const int LOG_LEVEL_SILENT = 0; -const int LOG_LEVEL_FATAL = 1; -const int LOG_LEVEL_ERROR = 2; -const int LOG_LEVEL_WARNING = 3; -const int LOG_LEVEL_INFO = 4; -const int LOG_LEVEL_DEBUG = 5; -const int LOG_LEVEL_VERBOSE = 6; +enum LogLevel { + SILENT(0), + FATAL(1), + ERROR(2), + WARNING(3), + INFO(4), + DEBUG(5), + VERBOSE(6); + + final int value; + const LogLevel(this.value); + + factory LogLevel.fromValue(int value) { + return switch (value) { + 0 => LogLevel.SILENT, + 1 => LogLevel.FATAL, + 2 => LogLevel.ERROR, + 3 => LogLevel.WARNING, + 4 => LogLevel.INFO, + 5 => LogLevel.DEBUG, + 6 => LogLevel.VERBOSE, + _ => throw ArgumentError.value(value, 'value', 'Invalid log level value'), + }; + } +} /// Sets the global logging level. -void setLogLevel(int logLevel) { - cvRun(() => ccore.setLogLevel(logLevel)); +void setLogLevel(LogLevel logLevel) { + cvRun(() => ccore.setLogLevel(logLevel.value)); } /// Gets the global logging level. -int getLogLevel() { +LogLevel getLogLevel() { final p = calloc(); cvRun(() => ccore.getLogLevel(p)); final level = p.value; calloc.free(p); - return level; + return LogLevel.fromValue(level); +} + +void writeLogMessage(LogLevel logLevel, String message) { + final cmsg = message.toNativeUtf8().cast(); + ccore.writeLogMessage(logLevel.value, cmsg); + calloc.free(cmsg); +} + +/// Writes a log message. +/// +/// if [tag], [file], [line], [func] are all null, then it will be the same as [writeLogMessage]. +void writeLogMessageEx( + LogLevel logLevel, + String message, { + String? tag, + String? file, + int? line, + String? func, +}) { + final cmsg = message.toNativeUtf8().cast(); + + if (tag == null && file == null && line == null && func == null) { + ccore.writeLogMessage(logLevel.value, cmsg); + } else { + final ctag = (tag ?? "").toNativeUtf8().cast(); + final cfile = (file ?? "").toNativeUtf8().cast(); + final cfunc = (func ?? "").toNativeUtf8().cast(); + ccore.writeLogMessageEx(logLevel.value, ctag, cfile, line ?? -1, cfunc, cmsg); + calloc.free(ctag); + calloc.free(cfile); + calloc.free(cfunc); + } + + calloc.free(cmsg); +} + +void defaultLogCallback(LogLevel logLevel, String message) { + final logMessage = "[dartcv][${logLevel.name}]: $message"; + // ignore: avoid_print + print(logMessage); +} + +void defaultLogCallbackEx( + LogLevel logLevel, + String tag, + String file, + int line, + String func, + String message, +) { + final logMessage = "[dartcv][${logLevel.name}][$tag]$file:$line:$func: $message"; + // ignore: avoid_print + print(logMessage); +} + +typedef LogCallbackFunction = void Function(LogLevel logLevel, String message); +typedef LogCallbackExFunction = void Function( + LogLevel logLevel, + String tag, + String file, + int line, + String func, + String message, +); + +ffi.NativeCallable? _logCallbackEx; +ffi.NativeCallable? _logCallback; + +void replaceWriteLogMessage({LogCallbackFunction? callback}) { + if (callback == null) { + cvRun(() => ccore.replaceWriteLogMessage(ffi.nullptr)); + _logCallback?.close(); + _logCallback = null; + } else { + void cCallback(int logLevel, ffi.Pointer message, int msgLen) { + final messageStr = message.cast().toDartString(length: msgLen); + callback(LogLevel.fromValue(logLevel), messageStr); + } + + final fp = ffi.NativeCallable.listener(cCallback); + cvRun(() => ccore.replaceWriteLogMessage(fp.nativeFunction)); + _logCallback = fp; + } +} + +void replaceWriteLogMessageEx({LogCallbackExFunction? callback}) { + if (callback == null) { + cvRun(() => ccore.replaceWriteLogMessageEx(ffi.nullptr)); + _logCallbackEx?.close(); + _logCallbackEx = null; + } else { + void cCallback( + int logLevel, + ffi.Pointer tag, + int tagLen, + ffi.Pointer file, + int fileLen, + int line, + ffi.Pointer func, + int funcLen, + ffi.Pointer message, + int msgLen, + ) { + final tagStr = tag.cast().toDartString(length: tagLen); + final fileStr = file.cast().toDartString(length: fileLen); + final funcStr = func.cast().toDartString(length: funcLen); + final messageStr = message.cast().toDartString(length: msgLen); + callback(LogLevel.fromValue(logLevel), tagStr, fileStr, line, funcStr, messageStr); + } + + final fp = ffi.NativeCallable.listener(cCallback); + cvRun(() => ccore.replaceWriteLogMessageEx(fp.nativeFunction)); + _logCallbackEx = fp; + } } /// get version diff --git a/packages/dartcv/lib/src/core/mat.dart b/packages/dartcv/lib/src/core/mat.dart index 2a88c23e..15b6a13b 100644 --- a/packages/dartcv/lib/src/core/mat.dart +++ b/packages/dartcv/lib/src/core/mat.dart @@ -17,6 +17,7 @@ import 'point.dart'; import 'rect.dart'; import 'scalar.dart'; import 'size.dart'; +import 'umat.dart'; import 'vec.dart'; class Mat extends CvStruct { @@ -339,7 +340,7 @@ class Mat extends CvStruct { int get channels => ccore.cv_Mat_channels(ref); int get total => ccore.cv_Mat_total(ref); bool get isEmpty => ccore.cv_Mat_empty(ref); - bool get isContinus => ccore.cv_Mat_isContinuous(ref); + bool get isContinuous => ccore.cv_Mat_isContinuous(ref); bool get isSubmatrix => ccore.cv_Mat_isSubmatrix(ref); (int, int, int) get step { final ms = ccore.cv_Mat_step(ref); @@ -1405,6 +1406,15 @@ class Mat extends CvStruct { return dst; } + /// retrieve [UMat] from [Mat] + /// + /// https://docs.opencv.org/4.x/d3/d63/classcv_1_1Mat.html#a6df360cd5a78aa8a3fcf2d445b7e7764 + UMat getUMat(AccessFlag accessFlags, {UMatUsageFlags usageFlags = UMatUsageFlags.USAGE_DEFAULT}) { + final umat = UMat.empty(); + cvRun(() => ccore.cv_Mat_getUMat(ref, accessFlags.value, usageFlags.value, umat.ptr, ffi.nullptr)); + return umat; + } + /// This Method converts single-channel Mat to 2D List List> toList() { final ret = >[]; diff --git a/packages/dartcv/lib/src/core/umat.dart b/packages/dartcv/lib/src/core/umat.dart new file mode 100644 index 00000000..26b93cf5 --- /dev/null +++ b/packages/dartcv/lib/src/core/umat.dart @@ -0,0 +1,470 @@ +// Copyright (c) 2024, rainyl and all contributors. All rights reserved. +// Use of this source code is governed by a Apache-2.0 license +// that can be found in the LICENSE file. + +import 'dart:ffi' as ffi; + +import 'package:ffi/ffi.dart'; + +import '../g/constants.g.dart'; +import '../g/types.g.dart' as cvg; +import '../native_lib.dart' show ccore; +import 'base.dart'; +import 'mat.dart'; +import 'mat_type.dart'; +import 'rect.dart'; +import 'scalar.dart'; +import 'vec.dart'; + +/// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#a25ac687266568c8b024debd187c15b9b +class UMat extends CvStruct { + UMat._(cvg.UMatPtr ptr, {bool attach = true, int? externalSize}) : super.fromPointer(ptr) { + if (attach) { + finalizer.attach(this, ptr.cast(), detach: this, externalSize: externalSize); + } + } + + static final finalizer = OcvFinalizer(ccore.addresses.cv_UMat_close); + + /// construct from pointer directly + factory UMat.fromPointer(cvg.UMatPtr mat, {bool attach = true, int? externalSize}) => + UMat._(mat, attach: attach, externalSize: externalSize); + + /// create an empty [UMat] + factory UMat.empty({UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT}) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_1(flags.value, p)); + return UMat._(p); + } + + /// constructs 2D matrix and fills it with the specified value. + /// + ///```c++ + /// UMat(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + /// UMat(int rows, int cols, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#a5e1866c206e9c116304a0257bc3a6c50 + factory UMat.create({ + int rows = 0, + int cols = 0, + int r = 0, + int g = 0, + int b = 0, + MatType? type, + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + if (rows == 0 && cols == 0) { + return UMat.empty(flags: flags); + } + + type = type ?? MatType.CV_8UC3; + final scalar = Scalar(b.toDouble(), g.toDouble(), r.toDouble(), 0); + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_3(rows, cols, type!.value, scalar.ref, flags.value, p)); + return UMat._(p); + } + + /// constructs n-dimensional matrix + /// ```c++ + /// UMat(int ndims, const int* sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + /// UMat(int ndims, const int* sizes, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#af159e956ff96c64745c6940a3b1820ba + factory UMat.nd( + List sizes, + MatType type, { + Scalar? s, + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + final cSizes = calloc(sizes.length); + cSizes.cast().asTypedList(sizes.length).setAll(0, sizes); + cvRun( + () => s == null + ? ccore.cv_UMat_create_4(sizes.length, cSizes, type.value, flags.value, p) + : ccore.cv_UMat_create_5(sizes.length, cSizes, type.value, s.ref, flags.value, p), + ); + calloc.free(cSizes); + return UMat._(p); + } + + /// copy constructor + factory UMat.fromUMat(UMat umat) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_6(umat.ref, p)); + return UMat._(p); + } + + /// creates a matrix header for a part of the bigger matrix + /// + /// ```c++ + /// UMat(const UMat& m, const Range& rowRange, const Range& colRange=Range::all()); + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#afeaabd3e9eef98ccef422a81176a4250 + factory UMat.fromRange(UMat umat, {int rowStart = 0, int? rowEnd, int colStart = 0, int? colEnd}) { + final p = calloc(); + rowEnd ??= umat.rows; + colEnd ??= umat.cols; + cvRun(() => ccore.cv_UMat_create_7(umat.ref, rowStart, rowEnd!, colStart, colEnd!, p)); + return UMat._(p); + } + + /// ```c++ + /// UMat(const UMat& m, const Rect& roi); + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#aaa3fa04bb82fee6026cc2e85df96a796 + factory UMat.fromRect(UMat umat, Rect roi) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_9(umat.ref, roi.ref, p)); + return UMat._(p); + } + + factory UMat.zeros({ + int rows = 0, + int cols = 0, + MatType type = MatType.CV_8UC1, + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_zeros(rows, cols, type.value, flags.value, p)); + return UMat._(p); + } + + factory UMat.zerosND( + List sizes, + MatType type, { + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + final cSizes = calloc(sizes.length); + cSizes.cast().asTypedList(sizes.length).setAll(0, sizes); + cvRun(() => ccore.cv_UMat_create_zeros_1(sizes.length, cSizes, type.value, flags.value, p)); + calloc.free(cSizes); + return UMat._(p); + } + + factory UMat.ones( + int rows, + int cols, + MatType type, { + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_ones(rows, cols, type.value, flags.value, p)); + return UMat._(p); + } + + factory UMat.onesND( + List sizes, + MatType type, { + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + final cSizes = calloc(sizes.length); + cSizes.cast().asTypedList(sizes.length).setAll(0, sizes); + cvRun(() => ccore.cv_UMat_create_ones_1(sizes.length, cSizes, type.value, flags.value, p)); + calloc.free(cSizes); + return UMat._(p); + } + + factory UMat.eye({ + int rows = 0, + int cols = 0, + MatType type = MatType.CV_8UC1, + UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT, + }) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_eye(rows, cols, type.value, flags.value, p)); + return UMat._(p); + } + + factory UMat.diag(UMat umat, {UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT}) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_create_diag(umat.ref, flags.value, p)); + return UMat._(p); + } + + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#a3d84c72c06ddd55d35b87c3d222d2674 + Mat getMat(AccessFlag accessFlags) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_getMat(ref, accessFlags.value, p, ffi.nullptr)); + return Mat.fromPointer(p); + } + + /// returns a new matrix header for the specified row + UMat row(int i) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_row(ref, i, p, ffi.nullptr)); + return UMat._(p); + } + + /// returns a new matrix header for the specified column + UMat col(int i) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_col(ref, i, p, ffi.nullptr)); + return UMat._(p); + } + + /// .. for the specified row span + UMat rowRange(int start, int end) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_rowRange(ref, start, end, p, ffi.nullptr)); + return UMat._(p); + } + + /// ... for the specified column span + UMat colRange(int start, int end) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_colRange(ref, start, end, p, ffi.nullptr)); + return UMat._(p); + } + + /// ... for the specified diagonal + /// (d=0 - the main diagonal, + /// >0 - a diagonal from the upper half, + /// <0 - a diagonal from the lower half) + UMat diag({int d = 0}) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_diag(ref, d, p, ffi.nullptr)); + return UMat._(p); + } + + /// returns deep copy of the matrix, i.e. the data is copied + UMat clone() { + final p = calloc(); + cvRun(() => ccore.cv_UMat_clone(ref, p, ffi.nullptr)); + return UMat._(p); + } + + /// copies those matrix elements to "m" that are marked with non-zero mask elements. + // + // It calls m.create(this->size(), this->type()). + void copyTo(UMat dst, {UMat? mask}) { + cvRun( + () => mask == null + ? ccore.cv_UMat_copyTo(ref, dst.ref, ffi.nullptr) + : ccore.cv_UMat_copyTo_2(ref, mask.ref, dst.ref, ffi.nullptr), + ); + } + + /// converts matrix to another datatype with optional scaling. + UMat convertTo(MatType type, {UMat? dst, double alpha = 1, double beta = 0}) { + dst ??= UMat.empty(); + cvRun(() => ccore.cv_UMat_convertTo(ref, type.value, alpha, beta, dst!.ref, ffi.nullptr)); + return dst; + } + + /// sets some of the matrix elements to s, according to the mask + void setTo(Scalar value, {UMat? mask}) { + mask ??= UMat.empty(); + cvRun(() => ccore.cv_UMat_setTo(ref, value.ref, mask!.ref, ffi.nullptr)); + } + + /// creates alternative matrix header for the same data, with different + /// number of channels and/or different number of rows. see cvReshape. + /// ```c++ + /// UMat reshape(int cn, int rows=0) const; + /// UMat reshape(int cn, int newndims, const int* newsz) const; + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#a25ac687266568c8b024debd187c15b9b + UMat reshape(int cn, {int? rows, List? newSizes}) { + final p = calloc(); + if (newSizes == null) { + cvRun(() => ccore.cv_UMat_reshape(ref, cn, rows ?? 0, p, ffi.nullptr)); + } else { + final cNewSizes = calloc(newSizes.length); + cNewSizes.cast().asTypedList(newSizes.length).setAll(0, newSizes); + cvRun(() => ccore.cv_UMat_reshape_2(ref, cn, newSizes.length, cNewSizes, p, ffi.nullptr)); + calloc.free(cNewSizes); + } + return UMat._(p); + } + + /// matrix transposition by means of matrix expressions + UMat t() { + final p = calloc(); + cvRun(() => ccore.cv_UMat_t(ref, p, ffi.nullptr)); + return UMat._(p); + } + + /// matrix inversion by means of matrix expressions + UMat inv({int method = DECOMP_LU}) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_inv(ref, method, p, ffi.nullptr)); + return UMat._(p); + } + + /// per-element matrix multiplication by means of matrix expressions + UMat mul(UMat m, {double alpha = 1}) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_mul(ref, m.ref, alpha, p, ffi.nullptr)); + return UMat._(p); + } + + /// computes dot-product + double dot(UMat m) { + final p = calloc(); + cvRun(() => ccore.cv_UMat_dot(ref, m.ref, p, ffi.nullptr)); + final rval = p.value; + calloc.free(p); + return rval; + } + + /// allocates new matrix data unless the matrix already has specified size and type. + /// + /// https://docs.opencv.org/4.x/d7/d45/classcv_1_1UMat.html#afe3063b40dd6c5d8a0054759c1142631 + void create(List sizes, MatType type, {UMatUsageFlags flags = UMatUsageFlags.USAGE_DEFAULT}) { + final cSizes = calloc(sizes.length); + cSizes.cast().asTypedList(sizes.length).setAll(0, sizes); + cvRun(() => ccore.cv_UMat_createFunc_2(ref, sizes.length, cSizes, type.value, flags.value, ffi.nullptr)); + calloc.free(cSizes); + } + + /// decreases reference counter; + void release() => ccore.cv_UMat_release(ref); + + /// deallocates the matrix data + void deallocate() => ccore.cv_UMat_deallocate(ref); + + /// increases the reference counter; use with care to avoid memleaks + void addref() => ccore.cv_UMat_addref(ref); + + ffi.Pointer handle(AccessFlag flag) => ccore.cv_UMat_handle(ref, flag.value); + + /// returns true iff the matrix data is continuous + bool get isContinuous => ccore.cv_UMat_isContinuous(ref); + + /// returns true if the matrix is a submatrix of another matrix + bool get isSubmatrix => ccore.cv_UMat_isSubmatrix(ref); + + /// returns element size in bytes, + int get elemSize => ccore.cv_UMat_elemSize(ref); + + ///returns the size of element channel in bytes. + int get elemSize1 => ccore.cv_UMat_elemSize1(ref); + + /// returns element type, similar to CV_MAT_TYPE(cvmat->type) + MatType get type => MatType(ccore.cv_UMat_type(ref)); + + /// returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int get depth => ccore.cv_UMat_depth(ref); + + /// returns element type, similar to CV_MAT_CN(cvmat->type) + int get channels => ccore.cv_UMat_channels(ref); + + /// returns step/elemSize1() + int step1([int i = 0]) => ccore.cv_UMat_step1(ref, i); + + /// returns true if matrix data is NULL + bool get empty => ccore.cv_UMat_empty(ref); + + /// returns true if matrix data is NULL + bool get isEmpty => ccore.cv_UMat_empty(ref); + + /// returns the total number of matrix elements + int get total => ccore.cv_UMat_total(ref); + + /// returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, {int depth = -1, bool requireContinuous = true}) => + ccore.cv_UMat_checkVector(ref, elemChannels, depth, requireContinuous); + + /// includes several bit-fields: + /// - the magic signature + /// - continuity flag + /// - depth + /// - number of channels + int get flags => ccore.cv_UMat_flags(ref); + + /// the matrix dimensionality, >= 2 + int get dims => ccore.cv_UMat_dims(ref); + + /// number of rows in the matrix; -1 when the matrix has more than 2 dimensions + int get rows => ccore.cv_UMat_rows(ref); + + /// number of columns in the matrix; -1 when the matrix has more than 2 dimensions + int get cols => ccore.cv_UMat_cols(ref); + + /// usage flags for allocator; recommend do not set directly, instead set during construct/create/getUMat + int get usageFlags => ccore.cv_UMat_usageFlags(ref); + + /// offset of the submatrix (or 0) + int get offset => ccore.cv_UMat_offset(ref); + + /// number of bytes each matrix element/row/plane/dimension occupies + (int, int, int) get step { + final ms = ccore.cv_UMat_step(ref); + return (ms.p[0], ms.p[1], ms.p[2]); + } + + /// dimensional size of the matrix; accessible in various formats + VecI32 get size => VecI32.fromPointer(ccore.cv_UMat_size(ref)); + + @override + cvg.UMat get ref => ptr.ref; + + void dispose() { + finalizer.detach(this); + ccore.cv_UMat_close(ptr); + } + + @override + String toString() => "UMat(addr=0x${ptr.address.toRadixString(16)}, " + "type=${type.asString()}, rows=$rows, cols=$cols, channels=$channels)"; + + static const int MAGIC_VAL = 0x42FF0000; + static const int AUTO_STEP = 0; + static const int CONTINUOUS_FLAG = CV_MAT_CONT_FLAG; + static const int SUBMATRIX_FLAG = CV_SUBMAT_FLAG; + + static const int MAGIC_MASK = 0xFFFF0000; + static const int TYPE_MASK = 0x00000FFF; + static const int DEPTH_MASK = 7; +} + +enum UMatUsageFlags { + USAGE_DEFAULT(0), + + // buffer allocation policy is platform and usage specific + USAGE_ALLOCATE_HOST_MEMORY(1 << 0), // 1 + USAGE_ALLOCATE_DEVICE_MEMORY(1 << 1), // 2 + + /// It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY + USAGE_ALLOCATE_SHARED_MEMORY(1 << 2); // 4 + + final int value; + + const UMatUsageFlags(this.value); + + static UMatUsageFlags fromValue(int value) => switch (value) { + 0 => USAGE_DEFAULT, + 1 => USAGE_ALLOCATE_HOST_MEMORY, + 2 => USAGE_ALLOCATE_DEVICE_MEMORY, + 4 => USAGE_ALLOCATE_SHARED_MEMORY, + _ => throw ArgumentError('Unknown value for UMatUsageFlags: $value'), + }; +} + +enum AccessFlag { + ACCESS_READ(1 << 24), // 0x1000000 + ACCESS_WRITE(1 << 25), // 0x2000000 + ACCESS_RW(3 << 24), // 0x3000000 + ACCESS_MASK(3 << 24), // 0x3000000 + ACCESS_FAST(1 << 26); // 0x4000000 + + final int value; + + const AccessFlag(this.value); + + static AccessFlag fromValue(int value) => switch (value) { + 0x1000000 => ACCESS_READ, + 0x2000000 => ACCESS_WRITE, + 0x3000000 => ACCESS_RW, + 0x4000000 => ACCESS_FAST, + _ => throw ArgumentError('Unknown value for AccessFlag: $value'), + }; +} diff --git a/packages/dartcv/lib/src/features2d/features2d.dart b/packages/dartcv/lib/src/features2d/features2d.dart index 6e6f6a34..d5dd626e 100644 --- a/packages/dartcv/lib/src/features2d/features2d.dart +++ b/packages/dartcv/lib/src/features2d/features2d.dart @@ -14,290 +14,17 @@ import '../core/base.dart'; import '../core/dmatch.dart'; import '../core/keypoint.dart'; import '../core/mat.dart'; +import '../core/point.dart'; import '../core/scalar.dart'; import '../core/vec.dart'; import '../g/constants.g.dart'; import '../g/features2d.g.dart' as cvg; import '../native_lib.dart' show cfeatures2d; - -class FlannIndexParams extends CvStruct { - FlannIndexParams.fromPointer(cvg.FlannIndexParamsPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { - if (attach) { - finalizer.attach(this, ptr.cast()); - } - } - - factory FlannIndexParams.empty() { - final p = calloc(); - cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); - return FlannIndexParams.fromPointer(p); - } - - factory FlannIndexParams.fromMap(Map map) { - final params = FlannIndexParams.empty(); - for (final entry in map.entries) { - switch (entry.value) { - case int(): - params.set(entry.key, entry.value as int); - case double(): - params.set(entry.key, entry.value as double); - case String(): - params.set(entry.key, entry.value as String); - case bool(): - params.set(entry.key, entry.value as bool); - case cvg.FlannAlgorithm(): - params.set(entry.key, entry.value as cvg.FlannAlgorithm); - default: - throw ArgumentError('Value type ${entry.value.runtimeType} is not supported for FlannIndexParams'); - } - } - return params; - } - - static final finalizer = OcvFinalizer( - cfeatures2d.addresses.cv_flann_IndexParams_close, - ); - - @override - cvg.FlannIndexParams get ref => ptr.ref; - - String getString(String key, [String defaultValue = ""]) { - final ckey = key.toNativeUtf8().cast(); - final cdefault = defaultValue.toNativeUtf8().cast(); - final crval = calloc>(); - cfeatures2d.cv_flann_IndexParams_getString(ref, ckey, cdefault, crval); - calloc.free(ckey); - calloc.free(cdefault); - - final rval = crval.value.cast().toDartString(); - calloc.free(crval); - return rval; - } - - int getInt(String key, [int defaultValue = -1]) { - final ckey = key.toNativeUtf8().cast(); - final crval = calloc(); - cfeatures2d.cv_flann_IndexParams_getInt(ref, ckey, defaultValue, crval); - calloc.free(ckey); - final rval = crval.value; - calloc.free(crval); - return rval; - } - - double getDouble(String key, [double defaultValue = -1]) { - final ckey = key.toNativeUtf8().cast(); - final crval = calloc(); - cfeatures2d.cv_flann_IndexParams_getDouble(ref, ckey, defaultValue, crval); - calloc.free(ckey); - final rval = crval.value; - calloc.free(crval); - return rval; - } - - // bool getBool(String key, [bool defaultValue = false]) { - // final ckey = key.toNativeUtf8().cast(); - // final crval = calloc(); - // cfeatures2d.cv_flann_IndexParams_getBool(ref, ckey, defaultValue, crval); - // calloc.free(ckey); - // final rval = crval.value; - // calloc.free(crval); - // return rval; - // } - - Map getAll() { - final names = VecVecChar(); - final types = VecI32(); - final strValues = VecVecChar(); - final numValues = VecF64(); - - cfeatures2d.cv_flann_IndexParams_getAll(ref, names.ptr, types.ptr, strValues.ptr, numValues.ptr); - - final rval = {}; - final names1 = names.asStringList(); - for (var i = 0; i < names1.length; i++) { - final name = names1[i]; - final type = types[i]; - rval[name] = switch (cvg.FlannIndexType.fromValue(type)) { - cvg.FlannIndexType.FLANN_INDEX_TYPE_8U || - cvg.FlannIndexType.FLANN_INDEX_TYPE_8S || - cvg.FlannIndexType.FLANN_INDEX_TYPE_16U || - cvg.FlannIndexType.FLANN_INDEX_TYPE_16S || - cvg.FlannIndexType.FLANN_INDEX_TYPE_32S => - numValues[i].toInt(), - cvg.FlannIndexType.FLANN_INDEX_TYPE_32F || cvg.FlannIndexType.FLANN_INDEX_TYPE_64F => numValues[i], - cvg.FlannIndexType.FLANN_INDEX_TYPE_BOOL => numValues[i].toInt() != 0, - cvg.FlannIndexType.FLANN_INDEX_TYPE_STRING => names1[i], - cvg.FlannIndexType.FLANN_INDEX_TYPE_ALGORITHM => cvg.FlannAlgorithm.fromValue(numValues[i].toInt()), - }; - } - - return rval; - } - - void setString(String key, String value) { - final ckey = key.toNativeUtf8().cast(); - final cvalue = value.toNativeUtf8().cast(); - cfeatures2d.cv_flann_IndexParams_setString(ref, ckey, cvalue); - calloc.free(ckey); - calloc.free(cvalue); - } - - void setInt(String key, int value) { - final ckey = key.toNativeUtf8().cast(); - cfeatures2d.cv_flann_IndexParams_setInt(ref, ckey, value); - calloc.free(ckey); - } - - void setDouble(String key, double value) { - final ckey = key.toNativeUtf8().cast(); - cfeatures2d.cv_flann_IndexParams_setDouble(ref, ckey, value); - calloc.free(ckey); - } - - void setBool(String key, bool value) { - final ckey = key.toNativeUtf8().cast(); - cfeatures2d.cv_flann_IndexParams_setBool(ref, ckey, value); - calloc.free(ckey); - } - - void setAlgorithm(cvg.FlannAlgorithm value) { - cfeatures2d.cv_flann_IndexParams_setAlgorithm(ref, value.value); - } - - T get(String key, [T? defaultValue]) { - if (T == int) { - return getInt(key, defaultValue as int? ?? -1) as T; - } else if (T == double) { - return getDouble(key, defaultValue as double? ?? -1.0) as T; - } else if (T == String) { - return getString(key, defaultValue as String? ?? "") as T; - } else { - throw ArgumentError("Unsupported type: ${T.runtimeType}"); - } - } - - void set(String key, T value) { - switch (value) { - case int(): - setInt(key, value); - case double(): - setDouble(key, value); - case String(): - setString(key, value); - case bool(): - setBool(key, value); - case cvg.FlannAlgorithm(): - setAlgorithm(value); - default: - throw ArgumentError("Unsupported type: ${value.runtimeType}"); - } - } - - @override - String toString() { - return "FlannIndexParams(address=0x${ptr.address.toRadixString(16)})"; - } -} - -class FlannSearchParams extends FlannIndexParams { - FlannSearchParams.fromPointer( - super.ptr, - int checks, - double eps, - bool sorted, - bool exploreAllTrees, [ - super.attach = true, - ]) : _checks = checks, - _eps = eps, - _sorted = sorted, - _exploreAllTrees = exploreAllTrees, - super.fromPointer(); - - factory FlannSearchParams({ - int checks = 32, - double eps = 0.0, - bool sorted = true, - bool exploreAllTrees = false, - }) { - final p = calloc(); - cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); - final params = FlannSearchParams.fromPointer(p, checks, eps, sorted, exploreAllTrees); - - params.setInt('checks', checks); - params.setDouble('eps', eps); - - params.setInt('sorted', sorted ? 1 : 0); - params.setInt('explore_all_trees', exploreAllTrees ? 1 : 0); - - return params; - } - - int _checks; - double _eps; - bool _sorted; - bool _exploreAllTrees; - - int get checks => _checks; - double get eps => _eps; - bool get sorted => _sorted; - bool get exploreAllTrees => _exploreAllTrees; - - set checks(int value) { - _checks = value; - setInt("checks", value); - } - - set eps(double value) { - _eps = value; - setDouble("eps", value); - } - - set sorted(bool value) { - _sorted = value; - setInt("sorted", value ? 1 : 0); - } - - set exploreAllTrees(bool value) { - _exploreAllTrees = value; - setInt("explore_all_trees", value ? 1 : 0); - } - - @override - String toString() { - return "FlannSearchParams(" - "address=0x${ptr.address.toRadixString(16)}, " - "checks=$checks, " - "eps=$eps, " - "sorted=$sorted, " - "exploreAllTrees=$exploreAllTrees)"; - } -} - -class FlannKDTreeIndexParams extends FlannIndexParams { - FlannKDTreeIndexParams.fromPointer(super.ptr, [super.attach = true]) : super.fromPointer(); - - factory FlannKDTreeIndexParams({int trees = 4}) { - final p = calloc(); - cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); - final params = FlannKDTreeIndexParams.fromPointer(p); - - params.setAlgorithm(cvg.FlannAlgorithm.FLANN_INDEX_KDTREE); - params.setInt('trees', trees); - - return params; - } - - int get trees => getInt("trees"); - set trees(int value) => setInt("trees", value); - - @override - String toString() { - return 'FlannKDTreeIndexParams(address=0x${ptr.address.toRadixString(16)}, trees=$trees)'; - } -} +import 'features2d_base.dart'; +import 'features2d_enum.dart'; /// AKAZE is a wrapper around the cv::AKAZE algorithm. -class AKAZE extends CvStruct { +class AKAZE extends Feature2D { AKAZE._(cvg.AKAZEPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -315,27 +42,74 @@ class AKAZE extends CvStruct { return AKAZE._(p); } + /// The AKAZE constructor. + /// + /// https://docs.opencv.org/4.x/d8/d30/classcv_1_1AKAZE.html#ac5d847ee303373416c7ad1950ea046ed + factory AKAZE.create({ + AKAZEDescriptorType descriptorType = AKAZEDescriptorType.DESCRIPTOR_MLDB, + int descriptorSize = 0, + int descriptorChannels = 3, + double threshold = 0.001, + int nOctaves = 4, + int nOctaveLayers = 4, + KAZEDiffusivityType diffusivity = KAZEDiffusivityType.DIFF_PM_G2, + int maxPoints = -1, + }) { + final p = calloc(); + cvRun( + () => cfeatures2d.cv_AKAZE_create_1( + descriptorType.value, + descriptorSize, + descriptorChannels, + threshold, + nOctaves, + nOctaveLayers, + diffusivity.value, + maxPoints, + p, + ), + ); + return AKAZE._(p); + } + /// Detect keypoints in an image using AKAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_AKAZE_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_AKAZE_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; } /// DetectAndCompute keypoints and compute in an image using AKAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - (VecKeyPoint ret, Mat desc) detectAndCompute(Mat src, Mat mask) { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + @override + (VecKeyPoint ret, Mat desc) detectAndCompute( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); cvRun( - () => cfeatures2d.cv_AKAZE_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, ffi.nullptr), + () => cfeatures2d.cv_AKAZE_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + ffi.nullptr, + ), ); - return (ret, desc); + return (keypoints, descriptors); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_AKAZE_close); @@ -347,10 +121,47 @@ class AKAZE extends CvStruct { @override cvg.AKAZE get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.AKAZE"; + + @override + bool get isEmpty => cfeatures2d.cv_AKAZE_empty(ref); + + AKAZEDescriptorType get descriptorType => + AKAZEDescriptorType.fromValue(cfeatures2d.cv_AKAZE_getDescriptorType(ref)); + set descriptorType(AKAZEDescriptorType value) => cfeatures2d.cv_AKAZE_setDescriptorType(ref, value.value); + + int get descriptorSize => cfeatures2d.cv_AKAZE_getDescriptorSize(ref); + set descriptorSize(int value) => cfeatures2d.cv_AKAZE_setDescriptorSize(ref, value); + + int get descriptorChannels => cfeatures2d.cv_AKAZE_getDescriptorChannels(ref); + set descriptorChannels(int value) => cfeatures2d.cv_AKAZE_setDescriptorChannels(ref, value); + + double get threshold => cfeatures2d.cv_AKAZE_getThreshold(ref); + set threshold(double value) => cfeatures2d.cv_AKAZE_setThreshold(ref, value); + + int get nOctaves => cfeatures2d.cv_AKAZE_getNOctaves(ref); + set nOctaves(int value) => cfeatures2d.cv_AKAZE_setNOctaves(ref, value); + + int get nOctaveLayers => cfeatures2d.cv_AKAZE_getNOctaveLayers(ref); + set nOctaveLayers(int value) => cfeatures2d.cv_AKAZE_setNOctaveLayers(ref, value); + + KAZEDiffusivityType get diffusivity => + KAZEDiffusivityType.fromValue(cfeatures2d.cv_AKAZE_getDiffusivity(ref)); + set diffusivity(KAZEDiffusivityType value) => cfeatures2d.cv_AKAZE_setDiffusivity(ref, value.value); + + int get maxPoints => cfeatures2d.cv_AKAZE_getMaxPoints(ref); + set maxPoints(int value) => cfeatures2d.cv_AKAZE_setMaxPoints(ref, value); + + @override + String toString() { + return "AKAZE(addr=0x${ptr.address.toRadixString(16)})"; + } } /// AgastFeatureDetector is a wrapper around the cv::AgastFeatureDetector. -class AgastFeatureDetector extends CvStruct { +class AgastFeatureDetector extends Feature2D { AgastFeatureDetector._(cvg.AgastFeatureDetectorPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -369,14 +180,42 @@ class AgastFeatureDetector extends CvStruct { return AgastFeatureDetector._(p); } + /// create (int threshold=10, bool nonmaxSuppression=true, AgastFeatureDetector::DetectorType type=AgastFeatureDetector::OAST_9_16) + /// + /// https://docs.opencv.org/4.x/d7/d19/classcv_1_1AgastFeatureDetector.html#ae1987fb24e86701236773dfa7f6dabee + factory AgastFeatureDetector.create({ + int threshold = 10, + bool nonmaxSuppression = true, + AgastDetectorType type = AgastDetectorType.OAST_9_16, + }) { + final p = calloc(); + cvRun(() => cfeatures2d.cv_AgastFeatureDetector_create_1(threshold, nonmaxSuppression, type.value, p)); + return AgastFeatureDetector._(p); + } + /// Detect keypoints in an image using AgastFeatureDetector. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_AgastFeatureDetector_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun( + () => cfeatures2d.cv_AgastFeatureDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr), + ); + return keypoints; + } + + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + throw UnsupportedError("This function is not supported by AgastFeatureDetector"); } static final finalizer = OcvFinalizer( @@ -390,10 +229,30 @@ class AgastFeatureDetector extends CvStruct { @override cvg.AgastFeatureDetector get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.AgastFeatureDetector"; + + @override + bool get isEmpty => cfeatures2d.cv_AgastFeatureDetector_empty(ref); + + int get threshold => cfeatures2d.cv_AgastFeatureDetector_getThreshold(ref); + set threshold(int value) => cfeatures2d.cv_AgastFeatureDetector_setThreshold(ref, value); + + bool get nonmaxSuppression => cfeatures2d.cv_AgastFeatureDetector_getNonmaxSuppression(ref); + set nonmaxSuppression(bool value) => cfeatures2d.cv_AgastFeatureDetector_setNonmaxSuppression(ref, value); + + AgastDetectorType get type => AgastDetectorType.fromValue(cfeatures2d.cv_AgastFeatureDetector_getType(ref)); + set type(AgastDetectorType value) => cfeatures2d.cv_AgastFeatureDetector_setType(ref, value.value); + + @override + String toString() { + return "AgastFeatureDetector(addr=0x${ptr.address.toRadixString(16)})"; + } } /// BRISK is a wrapper around the cv::BRISK algorithm. -class BRISK extends CvStruct { +class BRISK extends Feature2D { BRISK._(cvg.BRISKPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -404,34 +263,148 @@ class BRISK extends CvStruct { /// returns a new BRISK algorithm /// /// For further details, please see: - /// https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html + /// https://docs.opencv.org/4.x/de/dbf/classcv_1_1BRISK.html factory BRISK.empty() { final p = calloc(); cvRun(() => cfeatures2d.cv_BRISK_create(p)); return BRISK._(p); } + /// The BRISK constructor + /// + /// [thresh] AGAST detection threshold score. + /// [octaves] detection octaves. Use 0 to do single scale. + /// [patternScale] apply this scale to the pattern used for sampling the neighbourhood of a keypoint. + /// + ///```c++ + /// CV_WRAP static Ptr create(int thresh=30, int octaves=3, float patternScale=1.0f); + ///``` + /// https://docs.opencv.org/4.x/de/dbf/classcv_1_1BRISK.html#ad3b513ded80119670e5efa90a31705ac + factory BRISK.create({int thresh = 30, int octaves = 3, double patternScale = 1.0}) { + final p = calloc(); + cvRun(() => cfeatures2d.cv_BRISK_create_3(thresh, octaves, patternScale, p)); + return BRISK._(p); + } + + /// The BRISK constructor for a custom pattern + /// + /// [radiusList] defines the radii (in pixels) where the samples around a keypoint are taken (for keypoint scale 1). + /// [numberList] defines the number of sampling points on the sampling circle. Must be the same size as radiusList.. + /// [dMax] threshold for the short pairings used for descriptor formation (in pixels for keypoint scale 1). + /// [dMin] threshold for the long pairings used for orientation determination (in pixels for keypoint scale 1). + /// [indexChange] index remapping of the bits. + /// + /// ```c++ + /// CV_WRAP static Ptr create(const std::vector &radiusList, const std::vector &numberList, + /// float dMax=5.85f, float dMin=8.2f, const std::vector& indexChange=std::vector()); + /// ``` + /// + /// https://docs.opencv.org/4.x/de/dbf/classcv_1_1BRISK.html#ad3b513ded80119670e5efa90a31705ac + factory BRISK.create1({ + required List radiusList, + required List numberList, + double dMax = 5.85, + double dMin = 8.2, + List? indexChange, + }) { + final p = calloc(); + final radiusList_ = radiusList.f32; + final numberList_ = numberList.i32; + final indexChange_ = indexChange?.i32 ?? VecI32(); + cvRun( + () => cfeatures2d.cv_BRISK_create_1(radiusList_.ref, numberList_.ref, dMax, dMin, indexChange_.ref, p), + ); + radiusList_.dispose(); + numberList_.dispose(); + indexChange_.dispose(); + return BRISK._(p); + } + + /// The BRISK constructor for a custom pattern, detection threshold and octaves + /// + /// [thresh] AGAST detection threshold score. + /// [octaves] detection octaves. Use 0 to do single scale. + /// [radiusList] defines the radii (in pixels) where the samples around a keypoint are taken (for keypoint scale 1). + /// [numberList] defines the number of sampling points on the sampling circle. Must be the same size as radiusList.. + /// [dMax] threshold for the short pairings used for descriptor formation (in pixels for keypoint scale 1). + /// [dMin] threshold for the long pairings used for orientation determination (in pixels for keypoint scale 1). + /// [indexChange] index remapping of the bits. + /// + /// ```c++ + /// CV_WRAP static Ptr create(int thresh, int octaves, const std::vector &radiusList, + /// const std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + /// const std::vector& indexChange=std::vector()); + ///``` + /// https://docs.opencv.org/4.x/de/dbf/classcv_1_1BRISK.html#a4204a459edce314ace1c2bd783e2b185 + factory BRISK.create2({ + required int thresh, + required int octaves, + required List radiusList, + required List numberList, + double dMax = 5.85, + double dMin = 8.2, + List? indexChange, + }) { + final p = calloc(); + final radiusList_ = radiusList.f32; + final numberList_ = numberList.i32; + final indexChange_ = indexChange?.i32 ?? VecI32(); + cvRun( + () => cfeatures2d.cv_BRISK_create_2( + thresh, + octaves, + radiusList_.ref, + numberList_.ref, + dMax, + dMin, + indexChange_.ref, + p, + ), + ); + radiusList_.dispose(); + numberList_.dispose(); + indexChange_.dispose(); + return BRISK._(p); + } + /// Detect keypoints in an image using BRISK. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_BRISK_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_BRISK_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; } /// DetectAndCompute keypoints and compute in an image using BRISK. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - (VecKeyPoint, Mat) detectAndCompute(Mat src, Mat mask) { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); cvRun( - () => cfeatures2d.cv_BRISK_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, ffi.nullptr), + () => cfeatures2d.cv_BRISK_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + ffi.nullptr, + ), ); - return (ret, desc); + return (keypoints, descriptors); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_BRISK_close); @@ -443,24 +416,34 @@ class BRISK extends CvStruct { @override cvg.BRISK get ref => ptr.ref; -} -enum FastFeatureDetectorType { - /// FastFeatureDetector::TYPE_5_8 - TYPE_5_8(0), + @override + String get defaultName => "${super.defaultName}.BRISK"; + + @override + bool get isEmpty => cfeatures2d.cv_BRISK_empty(ref); + + int get threshold => cfeatures2d.cv_BRISK_getThreshold(ref); + set threshold(int value) => cfeatures2d.cv_BRISK_setThreshold(ref, value); - /// FastFeatureDetector::TYPE_7_12 - TYPE_7_12(1), + /// Set detection octaves. + /// [octaves] detection octaves. Use 0 to do single scale. + int get octaves => cfeatures2d.cv_BRISK_getOctaves(ref); + set octaves(int value) => cfeatures2d.cv_BRISK_setOctaves(ref, value); - /// FastFeatureDetector::TYPE_9_16 - TYPE_9_16(2); + /// Set detection patternScale. + /// [patternScale] apply this scale to the pattern used for sampling the neighbourhood of a keypoint. + double get patternScale => cfeatures2d.cv_BRISK_getPatternScale(ref); + set patternScale(double value) => cfeatures2d.cv_BRISK_setPatternScale(ref, value); - const FastFeatureDetectorType(this.value); - final int value; + @override + String toString() { + return "BRISK(addr=0x${ptr.address.toRadixString(16)})"; + } } /// FastFeatureDetector is a wrapper around the cv::FastFeatureDetector. -class FastFeatureDetector extends CvStruct { +class FastFeatureDetector extends Feature2D { FastFeatureDetector._(cvg.FastFeatureDetectorPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -497,10 +480,25 @@ class FastFeatureDetector extends CvStruct { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_FastFeatureDetector_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun( + () => cfeatures2d.cv_FastFeatureDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr), + ); + return keypoints; + } + + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + throw UnsupportedError("This function/feature is not supported."); } static final finalizer = OcvFinalizer( @@ -514,10 +512,31 @@ class FastFeatureDetector extends CvStruct { @override cvg.FastFeatureDetector get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.FastFeatureDetector"; + + @override + bool get isEmpty => cfeatures2d.cv_FastFeatureDetector_empty(ref); + + int get threshold => cfeatures2d.cv_FastFeatureDetector_getThreshold(ref); + set threshold(int value) => cfeatures2d.cv_FastFeatureDetector_setThreshold(ref, value); + + bool get nonmaxSuppression => cfeatures2d.cv_FastFeatureDetector_getNonmaxSuppression(ref); + set nonmaxSuppression(bool value) => cfeatures2d.cv_FastFeatureDetector_setNonmaxSuppression(ref, value); + + FastFeatureDetectorType get type => + FastFeatureDetectorType.fromValue(cfeatures2d.cv_FastFeatureDetector_getType(ref)); + set type(FastFeatureDetectorType value) => cfeatures2d.cv_FastFeatureDetector_setType(ref, value.value); + + @override + String toString() { + return "FastFeatureDetector(addr=0x${ptr.address.toRadixString(16)})"; + } } /// GFTTDetector is a wrapper around the cv::GFTTDetector. -class GFTTDetector extends CvStruct { +class GFTTDetector extends Feature2D { GFTTDetector._(cvg.GFTTDetectorPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -536,14 +555,83 @@ class GFTTDetector extends CvStruct { return GFTTDetector._(p); } + /// ```c++ + /// CV_WRAP static Ptr create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + /// int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); + /// ``` + factory GFTTDetector.create({ + int maxFeatures = 1000, + double qualityLevel = 0.01, + double minDistance = 1, + int blockSize = 3, + bool useHarrisDetector = false, + double k = 0.04, + }) { + final p = calloc(); + cvRun( + () => cfeatures2d.cv_GFTTDetector_create_2( + maxFeatures, + qualityLevel, + minDistance, + blockSize, + useHarrisDetector, + k, + p, + ), + ); + return GFTTDetector._(p); + } + + /// ```c++ + /// CV_WRAP static Ptr create( int maxCorners, double qualityLevel, double minDistance, + /// int blockSize, int gradiantSize, bool useHarrisDetector=false, double k=0.04 ); + /// ``` + factory GFTTDetector.create1({ + int maxCorners = 1000, + double qualityLevel = 0.01, + double minDistance = 1, + int blockSize = 3, + int gradiantSize = 3, + bool useHarrisDetector = false, + double k = 0.04, + }) { + final p = calloc(); + cvRun( + () => cfeatures2d.cv_GFTTDetector_create_1( + maxCorners, + qualityLevel, + minDistance, + blockSize, + gradiantSize, + useHarrisDetector, + k, + p, + ), + ); + return GFTTDetector._(p); + } + /// Detect keypoints in an image using GFTTDetector. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_GFTTDetector_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_GFTTDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; + } + + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + throw UnsupportedError("This function/feature is not supported."); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_GFTTDetector_close); @@ -555,10 +643,42 @@ class GFTTDetector extends CvStruct { @override cvg.GFTTDetector get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.GFTTDetector"; + + @override + bool get isEmpty => cfeatures2d.cv_GFTTDetector_empty(ref); + + int get maxFeatures => cfeatures2d.cv_GFTTDetector_getMaxFeatures(ref); + set maxFeatures(int value) => cfeatures2d.cv_GFTTDetector_setMaxFeatures(ref, value); + + double get qualityLevel => cfeatures2d.cv_GFTTDetector_getQualityLevel(ref); + set qualityLevel(double value) => cfeatures2d.cv_GFTTDetector_setQualityLevel(ref, value); + + double get minDistance => cfeatures2d.cv_GFTTDetector_getMinDistance(ref); + set minDistance(double value) => cfeatures2d.cv_GFTTDetector_setMinDistance(ref, value); + + int get blockSize => cfeatures2d.cv_GFTTDetector_getBlockSize(ref); + set blockSize(int value) => cfeatures2d.cv_GFTTDetector_setBlockSize(ref, value); + + int get gradientSize => cfeatures2d.cv_GFTTDetector_getGradientSize(ref); + set gradientSize(int value) => cfeatures2d.cv_GFTTDetector_setGradientSize(ref, value); + + bool get harrisDetector => cfeatures2d.cv_GFTTDetector_getHarrisDetector(ref); + set harrisDetector(bool value) => cfeatures2d.cv_GFTTDetector_setHarrisDetector(ref, value); + + double get k => cfeatures2d.cv_GFTTDetector_getK(ref); + set k(double value) => cfeatures2d.cv_GFTTDetector_setK(ref, value); + + @override + String toString() { + return "GFTTDetector(addr=0x${ptr.address.toRadixString(16)})"; + } } /// KAZE is a wrapper around the cv::KAZE. -class KAZE extends CvStruct { +class KAZE extends Feature2D { KAZE._(cvg.KAZEPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -576,25 +696,83 @@ class KAZE extends CvStruct { return KAZE._(p); } + //// The KAZE constructor + /// + /// [extended] Set to enable extraction of extended (128-byte) descriptor. + /// [upright] Set to enable use of upright descriptors (non rotation-invariant). + /// [threshold] Detector response threshold to accept point + /// [nOctaves] Maximum octave evolution of the image + /// [nOctaveLayers] Default number of sublevels per scale level + /// [diffusivity] Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or DIFF_CHARBONNIER + /// ```c++ + /// CV_WRAP static Ptr create(bool extended=false, bool upright=false, + /// float threshold = 0.001f, + /// int nOctaves = 4, int nOctaveLayers = 4, + /// KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2); + /// ``` + /// + /// https://docs.opencv.org/4.x/d3/d61/classcv_1_1KAZE.html#a2fdb3848a465a55bc39941f5af99f7e3 + factory KAZE.create({ + bool extended = false, + bool upright = false, + double threshold = 0.001, + int nOctaves = 4, + int nOctaveLayers = 4, + KAZEDiffusivityType diffusivity = KAZEDiffusivityType.DIFF_PM_G2, + }) { + final p = calloc(); + cvRun( + () => cfeatures2d.cv_KAZE_create_1( + extended, + upright, + threshold, + nOctaves, + nOctaveLayers, + diffusivity.value, + p, + ), + ); + return KAZE._(p); + } + /// Detect keypoints in an image using KAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_KAZE_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_KAZE_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; } /// DetectAndCompute keypoints and compute in an image using KAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - (VecKeyPoint, Mat) detectAndCompute(Mat src, Mat mask) { - final desc = Mat.empty(); - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_KAZE_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, ffi.nullptr)); - return (ret, desc); + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); + cvRun( + () => cfeatures2d.cv_KAZE_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + ffi.nullptr, + ), + ); + return (keypoints, descriptors); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_KAZE_close); @@ -606,10 +784,40 @@ class KAZE extends CvStruct { @override cvg.KAZE get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.KAZE"; + + @override + bool get isEmpty => cfeatures2d.cv_KAZE_empty(ref); + + set extended(bool extended) => cfeatures2d.cv_KAZE_setExtended(ref, extended); + bool get extended => cfeatures2d.cv_KAZE_getExtended(ref); + + set upright(bool upright) => cfeatures2d.cv_KAZE_setUpright(ref, upright); + bool get upright => cfeatures2d.cv_KAZE_getUpright(ref); + + set threshold(double threshold) => cfeatures2d.cv_KAZE_setThreshold(ref, threshold); + double get threshold => cfeatures2d.cv_KAZE_getThreshold(ref); + + set octaves(int octaves) => cfeatures2d.cv_KAZE_setNOctaves(ref, octaves); + int get octaves => cfeatures2d.cv_KAZE_getNOctaves(ref); + + set nOctaveLayers(int octaveLayers) => cfeatures2d.cv_KAZE_setNOctaveLayers(ref, octaveLayers); + int get nOctaveLayers => cfeatures2d.cv_KAZE_getNOctaveLayers(ref); + + set diffusivity(KAZEDiffusivityType diff) => cfeatures2d.cv_KAZE_setDiffusivity(ref, diff.value); + KAZEDiffusivityType get diffusivity => + KAZEDiffusivityType.fromValue(cfeatures2d.cv_KAZE_getDiffusivity(ref)); + + @override + String toString() { + return "KAZE(addr=0x${ptr.address.toRadixString(16)})"; + } } /// MSER is a wrapper around the cv::MSER. -class MSER extends CvStruct { +class MSER extends Feature2D { MSER._(cvg.MSERPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -627,14 +835,74 @@ class MSER extends CvStruct { return MSER._(p); } + /// Full constructor for %MSER detector + /// + /// delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$ + /// min_area prune the area which smaller than minArea + /// max_area prune the area which bigger than maxArea + /// max_variation prune the area have similar size to its children + /// min_diversity for color image, trace back to cut off mser with diversity less than min_diversity + /// max_evolution for color image, the evolution steps + /// area_threshold for color image, the area threshold to cause re-initialize + /// min_margin for color image, ignore too small margin + /// edge_blur_size for color image, the aperture size for edge blur + /// ```c++ + ///CV_WRAP static Ptr create( int delta=5, int min_area=60, int max_area=14400, + /// double max_variation=0.25, double min_diversity=.2, + /// int max_evolution=200, double area_threshold=1.01, + /// double min_margin=0.003, int edge_blur_size=5 ); + /// ``` + /// + factory MSER.create({ + int delta = 5, + int minArea = 60, + int maxArea = 14400, + double maxVariation = 0.25, + double minDiversity = 0.2, + int maxEvolution = 200, + double areaThreshold = 1.01, + double minMargin = 0.003, + int edgeBlurSize = 5, + }) { + final p = calloc(); + cvRun( + () => cfeatures2d.cv_MSER_create_1( + delta, + minArea, + maxArea, + maxVariation, + minDiversity, + maxEvolution, + areaThreshold, + minMargin, + edgeBlurSize, + p, + ), + ); + return MSER._(p); + } + /// Detect keypoints in an image using MSER. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_MSER_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_MSER_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; + } + + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + throw UnsupportedError("This fuction/feature is not supported."); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_MSER_close); @@ -646,18 +914,51 @@ class MSER extends CvStruct { @override cvg.MSER get ref => ptr.ref; -} -enum ORBScoreType { - HARRIS_SCORE(0), - FAST_SCORE(1); + @override + String get defaultName => "${super.defaultName}.MSER"; + + @override + bool get isEmpty => cfeatures2d.cv_MSER_empty(ref); + + set delta(int delta) => cfeatures2d.cv_MSER_setDelta(ref, delta); + int get delta => cfeatures2d.cv_MSER_getDelta(ref); + + set minArea(int minArea) => cfeatures2d.cv_MSER_setMinArea(ref, minArea); + int get minArea => cfeatures2d.cv_MSER_getMinArea(ref); - const ORBScoreType(this.value); - final int value; + set maxArea(int maxArea) => cfeatures2d.cv_MSER_setMaxArea(ref, maxArea); + int get maxArea => cfeatures2d.cv_MSER_getMaxArea(ref); + + set maxVariation(double maxVariation) => cfeatures2d.cv_MSER_setMaxVariation(ref, maxVariation); + double get maxVariation => cfeatures2d.cv_MSER_getMaxVariation(ref); + + set minDiversity(double minDiversity) => cfeatures2d.cv_MSER_setMinDiversity(ref, minDiversity); + double get minDiversity => cfeatures2d.cv_MSER_getMinDiversity(ref); + + set maxEvolution(int maxEvolution) => cfeatures2d.cv_MSER_setMaxEvolution(ref, maxEvolution); + int get maxEvolution => cfeatures2d.cv_MSER_getMaxEvolution(ref); + + set areaThreshold(double areaThreshold) => cfeatures2d.cv_MSER_setAreaThreshold(ref, areaThreshold); + double get areaThreshold => cfeatures2d.cv_MSER_getAreaThreshold(ref); + + set minMargin(double minMargin) => cfeatures2d.cv_MSER_setMinMargin(ref, minMargin); + double get minMargin => cfeatures2d.cv_MSER_getMinMargin(ref); + + set edgeBlurSize(int edgeBlurSize) => cfeatures2d.cv_MSER_setEdgeBlurSize(ref, edgeBlurSize); + int get edgeBlurSize => cfeatures2d.cv_MSER_getEdgeBlurSize(ref); + + set pass2Only(bool pass2Only) => cfeatures2d.cv_MSER_setPass2Only(ref, pass2Only); + bool get pass2Only => cfeatures2d.cv_MSER_getPass2Only(ref); + + @override + String toString() { + return "MSER(addr=0x${ptr.address.toRadixString(16)})"; + } } /// ORB is a wrapper around the cv::ORB. -class ORB extends CvStruct { +class ORB extends Feature2D { ORB._(cvg.ORBPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -712,37 +1013,40 @@ class ORB extends CvStruct { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_ORB_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_ORB_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; } /// DetectAndCompute keypoints and compute in an image using ORB. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + @override (VecKeyPoint, Mat) detectAndCompute( Mat src, Mat mask, { VecKeyPoint? keypoints, - Mat? description, + Mat? descriptors, bool useProvidedKeypoints = false, }) { keypoints ??= VecKeyPoint(); - description ??= Mat.empty(); + descriptors ??= Mat.empty(); cvRun( () => cfeatures2d.cv_ORB_detectAndCompute( ref, src.ref, mask.ref, + descriptors!.ref, keypoints!.ptr, - description!.ref, useProvidedKeypoints, ffi.nullptr, ), ); - return (keypoints, description); + return (keypoints, descriptors); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_ORB_close); @@ -754,186 +1058,48 @@ class ORB extends CvStruct { @override cvg.ORB get ref => ptr.ref; -} - -class SimpleBlobDetectorParams extends CvStruct { - SimpleBlobDetectorParams._(ffi.Pointer ptr, [bool attach = true]) - : super.fromPointer(ptr) { - if (attach) { - finalizer.attach(this, ptr.cast(), detach: this); - } - } - - factory SimpleBlobDetectorParams.empty() { - final p = calloc(); - cvRun(() => cfeatures2d.cv_SimpleBlobDetectorParams_create(p)); - return SimpleBlobDetectorParams._(p); - } - - factory SimpleBlobDetectorParams({ - int? blobColor, - bool? filterByArea, - bool? filterByCircularity, - bool? filterByColor, - bool? filterByConvexity, - bool? filterByInertia, - double? maxArea, - double? maxCircularity, - double? maxConvexity, - double? maxInertiaRatio, - double? maxThreshold, - double? minArea, - double? minCircularity, - double? minConvexity, - double? minDistBetweenBlobs, - double? minInertiaRatio, - int? minRepeatability, - double? minThreshold, - double? thresholdStep, - }) { - final p = calloc(); - if (blobColor != null) p.ref.blobColor = blobColor; - if (filterByArea != null) p.ref.filterByArea = filterByArea; - if (filterByCircularity != null) { - p.ref.filterByCircularity = filterByCircularity; - } - if (filterByColor != null) p.ref.filterByColor = filterByColor; - if (filterByConvexity != null) p.ref.filterByConvexity = filterByConvexity; - if (filterByInertia != null) p.ref.filterByInertia = filterByInertia; - if (maxArea != null) p.ref.maxArea = maxArea; - if (maxCircularity != null) p.ref.maxCircularity = maxCircularity; - if (maxConvexity != null) p.ref.maxConvexity = maxConvexity; - if (maxInertiaRatio != null) p.ref.maxInertiaRatio = maxInertiaRatio; - if (maxThreshold != null) p.ref.maxThreshold = maxThreshold; - if (minArea != null) p.ref.minArea = minArea; - if (minCircularity != null) p.ref.minCircularity = minCircularity; - if (minConvexity != null) p.ref.minConvexity = minConvexity; - if (minDistBetweenBlobs != null) { - p.ref.minDistBetweenBlobs = minDistBetweenBlobs; - } - if (minInertiaRatio != null) p.ref.minInertiaRatio = minInertiaRatio; - if (minRepeatability != null) p.ref.minRepeatability = minRepeatability; - if (minThreshold != null) p.ref.minThreshold = minThreshold; - if (thresholdStep != null) p.ref.thresholdStep = thresholdStep; - - return SimpleBlobDetectorParams._(p); - } - - factory SimpleBlobDetectorParams.fromNative(cvg.SimpleBlobDetectorParams r) => SimpleBlobDetectorParams( - blobColor: r.blobColor, - filterByArea: r.filterByArea, - filterByCircularity: r.filterByCircularity, - filterByColor: r.filterByColor, - filterByConvexity: r.filterByConvexity, - filterByInertia: r.filterByInertia, - maxArea: r.maxArea, - maxCircularity: r.maxCircularity, - maxConvexity: r.maxConvexity, - maxInertiaRatio: r.maxInertiaRatio, - maxThreshold: r.maxThreshold, - minArea: r.minArea, - minCircularity: r.minCircularity, - minConvexity: r.minConvexity, - minDistBetweenBlobs: r.minDistBetweenBlobs, - minInertiaRatio: r.minInertiaRatio, - minRepeatability: r.minRepeatability, - minThreshold: r.minThreshold, - thresholdStep: r.thresholdStep, - ); - factory SimpleBlobDetectorParams.fromPointer( - ffi.Pointer p, [ - bool attach = true, - ]) => - SimpleBlobDetectorParams._(p, attach); - - @override - cvg.SimpleBlobDetectorParams get ref => ptr.ref; - - static final finalizer = ffi.NativeFinalizer(calloc.nativeFree); - - void dispose() { - finalizer.detach(this); - calloc.free(ptr); - } - int get blobColor => ref.blobColor; - set blobColor(int value) => ref.blobColor = value; - - bool get filterByArea => ref.filterByArea; - set filterByArea(bool value) => ref.filterByArea = value; - - bool get filterByCircularity => ref.filterByCircularity; - set filterByCircularity(bool value) => ref.filterByCircularity = value; - - bool get filterByColor => ref.filterByColor; - set filterByColor(bool value) => ref.filterByColor = value; - - bool get filterByConvexity => ref.filterByConvexity; - set filterByConvexity(bool value) => ref.filterByConvexity = value; - - bool get filterByInertia => ref.filterByInertia; - set filterByInertia(bool value) => ref.filterByInertia = value; - - double get maxArea => ref.maxArea; - set maxArea(double v) => ref.maxArea = v; - - double get maxCircularity => ref.maxCircularity; - set maxCircularity(double v) => ref.maxCircularity = v; - - double get maxConvexity => ref.maxConvexity; - set maxConvexity(double v) => ref.maxConvexity = v; + @override + String get defaultName => "${super.defaultName}.ORB"; - double get maxInertiaRatio => ref.maxInertiaRatio; - set maxInertiaRatio(double v) => ref.maxInertiaRatio = v; + @override + bool get isEmpty => cfeatures2d.cv_ORB_empty(ref); - double get maxThreshold => ref.maxThreshold; - set maxThreshold(double v) => ref.maxThreshold = v; + int get maxFeatures => cfeatures2d.cv_ORB_getMaxFeatures(ref); + set maxFeatures(int value) => cfeatures2d.cv_ORB_setMaxFeatures(ref, value); - double get minArea => ref.minArea; - set minArea(double v) => ref.minArea = v; + double get scaleFactor => cfeatures2d.cv_ORB_getScaleFactor(ref); + set scaleFactor(double value) => cfeatures2d.cv_ORB_setScaleFactor(ref, value); - double get minCircularity => ref.minCircularity; - set minCircularity(double v) => ref.minCircularity = v; + int get nLevels => cfeatures2d.cv_ORB_getNLevels(ref); + set nLevels(int value) => cfeatures2d.cv_ORB_setNLevels(ref, value); - double get minConvexity => ref.minConvexity; - set minConvexity(double v) => ref.minConvexity = v; + int get edgeThreshold => cfeatures2d.cv_ORB_getEdgeThreshold(ref); + set edgeThreshold(int value) => cfeatures2d.cv_ORB_setEdgeThreshold(ref, value); - double get minDistBetweenBlobs => ref.minDistBetweenBlobs; - set minDistBetweenBlobs(double v) => ref.minDistBetweenBlobs = v; + int get firstLevel => cfeatures2d.cv_ORB_getFirstLevel(ref); + set firstLevel(int value) => cfeatures2d.cv_ORB_setFirstLevel(ref, value); - double get minInertiaRatio => ref.minInertiaRatio; - set minInertiaRatio(double v) => ref.minInertiaRatio = v; + int get WTA_K => cfeatures2d.cv_ORB_getWTA_K(ref); + set WTA_K(int value) => cfeatures2d.cv_ORB_setWTA_K(ref, value); - int get minRepeatability => ref.minRepeatability; - set minRepeatability(int v) => ref.minRepeatability = v; + ORBScoreType get scoreType => ORBScoreType.fromValue(cfeatures2d.cv_ORB_getScoreType(ref)); + set scoreType(ORBScoreType value) => cfeatures2d.cv_ORB_setScoreType(ref, value.value); - double get minThreshold => ref.minThreshold; - set minThreshold(double v) => ref.minThreshold = v; + int get patchSize => cfeatures2d.cv_ORB_getPatchSize(ref); + set patchSize(int value) => cfeatures2d.cv_ORB_setPatchSize(ref, value); - double get thresholdStep => ref.thresholdStep; - set thresholdStep(double v) => ref.thresholdStep = v; + int get fastThreshold => cfeatures2d.cv_ORB_getFastThreshold(ref); + set fastThreshold(int value) => cfeatures2d.cv_ORB_setFastThreshold(ref, value); @override - List get props => [ - maxArea, - minArea, - minConvexity, - maxConvexity, - minInertiaRatio, - maxInertiaRatio, - minThreshold, - maxThreshold, - thresholdStep, - minDistBetweenBlobs, - minRepeatability, - minThreshold, - thresholdStep, - minDistBetweenBlobs, - ]; + String toString() { + return "ORB(addr=0x${ptr.address.toRadixString(16)})"; + } } /// SimpleBlobDetector is a wrapper around the cv::SimpleBlobDetector. -class SimpleBlobDetector extends CvStruct { +class SimpleBlobDetector extends Feature2D { SimpleBlobDetector._(cvg.SimpleBlobDetectorPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -963,10 +1129,25 @@ class SimpleBlobDetector extends CvStruct { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_SimpleBlobDetector_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun( + () => cfeatures2d.cv_SimpleBlobDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr), + ); + return keypoints; + } + + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }) { + throw UnsupportedError("This fuction/feature is not supported."); } static final finalizer = OcvFinalizer( @@ -980,6 +1161,24 @@ class SimpleBlobDetector extends CvStruct { @override cvg.SimpleBlobDetector get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.SimpleBlobDetector"; + + @override + bool get isEmpty => cfeatures2d.cv_SimpleBlobDetector_empty(ref); + + set params(SimpleBlobDetectorParams params) => cfeatures2d.cv_SimpleBlobDetector_setParams(ref, params.ref); + SimpleBlobDetectorParams get params => + SimpleBlobDetectorParams.fromPointer(cfeatures2d.cv_SimpleBlobDetector_getParams(ref)); + + VecVecPoint getBlobContours() => + VecVecPoint.fromPointer(cfeatures2d.cv_SimpleBlobDetector_getBlobContours(ref)); + + @override + String toString() { + return "SimpleBlobDetector(addr=0x${ptr.address.toRadixString(16)})"; + } } /// BFMatcher is a wrapper around the cv::BFMatcher. @@ -994,13 +1193,16 @@ class BFMatcher extends CvStruct { /// returns a new BFMatcher algorithm /// /// For further details, please see: - /// https://docs.opencv.org/master/d3/d61/classcv_1_1KAZE.html + /// https://docs.opencv.org/4.x/d3/da1/classcv_1_1BFMatcher.html factory BFMatcher.empty() { final p = calloc(); cvRun(() => cfeatures2d.cv_BFMatcher_create(p)); return BFMatcher._(p); } + /// Brute-force matcher create method. + /// + /// https://docs.opencv.org/4.x/d3/da1/classcv_1_1BFMatcher.html#a02ef4d594b33d091767cbfe442aefb8a factory BFMatcher.create({int type = NORM_L2, bool crossCheck = false}) { final p = calloc(); cvRun(() => cfeatures2d.cv_BFMatcher_create_1(type, crossCheck, p)); @@ -1036,6 +1238,11 @@ class BFMatcher extends CvStruct { @override cvg.BFMatcher get ref => ptr.ref; + + @override + String toString() { + return "BFMatcher(addr=0x${ptr.address.toRadixString(16)})"; + } } /// FlannBasedMatcher is a wrapper around the cv::FlannBasedMatcher. @@ -1051,13 +1258,14 @@ class FlannBasedMatcher extends CvStruct { /// returns a new FlannBasedMatcher algorithm /// /// For further details, please see: - /// https://docs.opencv.org/master/d3/d61/classcv_1_1KAZE.html + /// https://docs.opencv.org/4.x/dc/de2/classcv_1_1FlannBasedMatcher.html factory FlannBasedMatcher.empty() { final p = calloc(); cvRun(() => cfeatures2d.cv_FlannBasedMatcher_create(p)); return FlannBasedMatcher._(p); } + /// https://docs.opencv.org/4.x/dc/de2/classcv_1_1FlannBasedMatcher.html#ab9114a6471e364ad221f89068ca21382 factory FlannBasedMatcher.create({FlannIndexParams? indexParams, FlannSearchParams? searchParams}) { if (indexParams == null && searchParams == null) { return FlannBasedMatcher.empty(); @@ -1094,27 +1302,15 @@ class FlannBasedMatcher extends CvStruct { @override cvg.FlannBasedMatcher get ref => ptr.ref; -} - -enum DrawMatchesFlag { - /// DEFAULT creates new image and for each keypoint only the center point will be drawn - DEFAULT(0), - - /// DRAW_OVER_OUTIMG draws matches on existing content of image - DRAW_OVER_OUTIMG(1), - - /// NOT_DRAW_SINGLE_POINTS will not draw single points - NOT_DRAW_SINGLE_POINTS(2), - /// DRAW_RICH_KEYPOINTS draws the circle around each keypoint with keypoint size and orientation - DRAW_RICH_KEYPOINTS(4); - - const DrawMatchesFlag(this.value); - final int value; + @override + String toString() { + return "FlannBasedMatcher(addr=0x${ptr.address.toRadixString(16)})"; + } } /// SIFT is a wrapper around the cv::SIFT. -class SIFT extends CvStruct { +class SIFT extends Feature2D { SIFT._(cvg.SIFTPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { if (attach) { finalizer.attach(this, ptr.cast(), detach: this); @@ -1125,32 +1321,120 @@ class SIFT extends CvStruct { /// returns a new SIFT algorithm /// /// For further details, please see: - /// https://docs.opencv.org/master/d5/d3c/classcv_1_1xfeatures2d_1_1SIFT.html + /// https://docs.opencv.org/4.x/d7/d60/classcv_1_1SIFT.html factory SIFT.empty() { final p = calloc(); cvRun(() => cfeatures2d.cv_SIFT_create(p)); return SIFT._(p); } + /// Create SIFT with specified descriptorType. + /// + /// [nfeatures] The number of best features to retain. The features are ranked by their scores + /// (measured in SIFT algorithm as the local contrast) + /// + /// [nOctaveLayers] The number of layers in each octave. 3 is the value used in D. Lowe paper. The + /// number of octaves is computed automatically from the image resolution. + /// + /// [contrastThreshold] The contrast threshold used to filter out weak features in semi-uniform + /// (low-contrast) regions. The larger the threshold, the less features are produced by the detector. + /// Note: The contrast threshold will be divided by nOctaveLayers when the filtering is applied. When + /// [nOctaveLayers] is set to default and if you want to use the value used in D. Lowe paper, 0.03, set + /// this argument to 0.09. + /// + /// [edgeThreshold] The threshold used to filter out edge-like features. Note that the its meaning + /// is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are + /// filtered out (more features are retained). + /// + /// [sigma] The sigma of the Gaussian applied to the input image at the octave \#0. If your image + /// is captured with a weak camera with soft lenses, you might want to reduce the number. + /// + /// [descriptorType] The type of descriptors. Only CV_32F and CV_8U are supported. + /// + /// [enable_precise_upscale] Whether to enable precise upscaling in the scale pyramid, which maps + /// index $\texttt{x}$ to $\texttt{2x}$. This prevents localization bias. The option + /// is disabled by default. + /// + /// ```c++ + /// CV_WRAP static Ptr create(int nfeatures, int nOctaveLayers, + /// double contrastThreshold, double edgeThreshold, + /// double sigma, int descriptorType, bool enable_precise_upscale = false); + /// ``` + /// + /// https://docs.opencv.org/4.x/d7/d60/classcv_1_1SIFT.html#a4264f700a8133074fb477e30d9beb331 + factory SIFT.create({ + int nfeatures = 0, + int nOctaveLayers = 3, + double contrastThreshold = 0.04, + double edgeThreshold = 10, + double sigma = 1.6, + int? descriptorType, + bool enable_precise_upscale = false, + }) { + final p = calloc(); + cvRun( + () => descriptorType == null + ? cfeatures2d.cv_SIFT_create_2( + nfeatures, + nOctaveLayers, + contrastThreshold, + edgeThreshold, + sigma, + enable_precise_upscale, + p, + ) + : cfeatures2d.cv_SIFT_create_1( + nfeatures, + nOctaveLayers, + contrastThreshold, + edgeThreshold, + sigma, + descriptorType, + enable_precise_upscale, + p, + ), + ); + return SIFT._(p); + } + /// Detect keypoints in an image using SIFT. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - VecKeyPoint detect(Mat src) { - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_SIFT_detect(ref, src.ref, ret.ptr, ffi.nullptr)); - return ret; + @override + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}) { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + cvRun(() => cfeatures2d.cv_SIFT_detect(ref, src.ref, keypoints!.ptr, mask!.ref, ffi.nullptr)); + return keypoints; } /// DetectAndCompute keypoints and compute in an image using SIFT. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - (VecKeyPoint, Mat) detectAndCompute(Mat src, Mat mask) { - final desc = Mat.empty(); - final ret = VecKeyPoint(); - cvRun(() => cfeatures2d.cv_SIFT_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, ffi.nullptr)); - return (ret, desc); + @override + (VecKeyPoint, Mat) detectAndCompute( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); + cvRun( + () => cfeatures2d.cv_SIFT_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + ffi.nullptr, + ), + ); + return (keypoints, descriptors); } static final finalizer = OcvFinalizer(cfeatures2d.addresses.cv_SIFT_close); @@ -1162,11 +1446,45 @@ class SIFT extends CvStruct { @override cvg.SIFT get ref => ptr.ref; + + @override + String get defaultName => "${super.defaultName}.SIFT"; + + @override + bool get isEmpty => cfeatures2d.cv_SIFT_empty(ref); + + set NFeatures(int maxFeatures) => cfeatures2d.cv_SIFT_setNFeatures(ref, maxFeatures); + int get NFeatures => cfeatures2d.cv_SIFT_getNFeatures(ref); + + set nOctaveLayers(int nOctaveLayers) => cfeatures2d.cv_SIFT_setNOctaveLayers(ref, nOctaveLayers); + int get nOctaveLayers => cfeatures2d.cv_SIFT_getNOctaveLayers(ref); + + set contrastThreshold(double contrastThreshold) => + cfeatures2d.cv_SIFT_setContrastThreshold(ref, contrastThreshold); + double get contrastThreshold => cfeatures2d.cv_SIFT_getContrastThreshold(ref); + + set edgeThreshold(double edgeThreshold) => cfeatures2d.cv_SIFT_setEdgeThreshold(ref, edgeThreshold); + double get edgeThreshold => cfeatures2d.cv_SIFT_getEdgeThreshold(ref); + + set sigma(double sigma) => cfeatures2d.cv_SIFT_setSigma(ref, sigma); + double get sigma => cfeatures2d.cv_SIFT_getSigma(ref); + + @override + String toString() { + return "SIFT(addr=0x${ptr.address.toRadixString(16)})"; + } } void drawKeyPoints(Mat src, VecKeyPoint keypoints, Mat dst, Scalar color, DrawMatchesFlag flag) { cvRun( - () => cfeatures2d.cv_drawKeyPoints(src.ref, keypoints.ref, dst.ref, color.ref, flag.value, ffi.nullptr), + () => cfeatures2d.cv_drawKeyPoints( + src.ref, + keypoints.ref, + dst.ref, + color.ref, + flag.value, + ffi.nullptr, + ), ); } diff --git a/packages/dartcv/lib/src/features2d/features2d_async.dart b/packages/dartcv/lib/src/features2d/features2d_async.dart index ee47cad2..02523f4f 100644 --- a/packages/dartcv/lib/src/features2d/features2d_async.dart +++ b/packages/dartcv/lib/src/features2d/features2d_async.dart @@ -14,18 +14,20 @@ import '../core/scalar.dart'; import '../core/vec.dart'; import '../native_lib.dart' show cfeatures2d; import './features2d.dart'; +import 'features2d_enum.dart'; extension AKAZEAsync on AKAZE { /// Detect keypoints in an image using AKAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); return cvRunAsync0( - (callback) => cfeatures2d.cv_AKAZE_detect(ref, src.ref, ret.ptr, callback), + (callback) => cfeatures2d.cv_AKAZE_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), (c) { - return c.complete(ret); + return c.complete(keypoints); }, ); } @@ -34,14 +36,27 @@ extension AKAZEAsync on AKAZE { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - Future<(VecKeyPoint, Mat)> detectAndComputeAsync(Mat src, Mat mask) async { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); return cvRunAsync0( - (callback) => - cfeatures2d.cv_AKAZE_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, callback), + (callback) => cfeatures2d.cv_AKAZE_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + callback, + ), (c) { - return c.complete((ret, desc)); + return c.complete((keypoints!, descriptors!)); }, ); } @@ -52,15 +67,31 @@ extension AgastFeatureDetectorAsync on AgastFeatureDetector { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); return cvRunAsync0( - (callback) => cfeatures2d.cv_AgastFeatureDetector_detect(ref, src.ref, ret.ptr, callback), + (callback) => + cfeatures2d.cv_AgastFeatureDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), (c) { - return c.complete(ret); + return c.complete(keypoints); }, ); } + + /// DetectAndCompute keypoints and compute in an image using AKAZE. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + throw UnsupportedError("This fuction/feature is not supported."); + } } extension BRISKAsync on BRISK { @@ -68,25 +99,42 @@ extension BRISKAsync on BRISK { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_BRISK_detect(ref, src.ref, ret.ptr, callback), (c) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_BRISK_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); } /// DetectAndCompute keypoints and compute in an image using BRISK. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - Future<(VecKeyPoint, Mat)> detectAndComputeAsync(Mat src, Mat mask) async { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); return cvRunAsync0( - (callback) => - cfeatures2d.cv_BRISK_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, callback), + (callback) => cfeatures2d.cv_BRISK_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + callback, + ), (c) { - return c.complete((ret, desc)); + return c.complete((keypoints!, descriptors!)); }, ); } @@ -97,15 +145,31 @@ extension FastFeatureDetectorAsync on FastFeatureDetector { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); return cvRunAsync0( - (callback) => cfeatures2d.cv_FastFeatureDetector_detect(ref, src.ref, ret.ptr, callback), + (callback) => + cfeatures2d.cv_FastFeatureDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), (c) { - return c.complete(ret); + return c.complete(keypoints); }, ); } + + /// DetectAndCompute keypoints and compute in an image using AKAZE. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + throw UnsupportedError("This fuction/feature is not supported."); + } } extension GFTTDetectorAsync on GFTTDetector { @@ -113,13 +177,29 @@ extension GFTTDetectorAsync on GFTTDetector { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_GFTTDetector_detect(ref, src.ref, ret.ptr, callback), ( - c, - ) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_GFTTDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); + } + + /// DetectAndCompute keypoints and compute in an image using AKAZE. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + throw UnsupportedError("This fuction/feature is not supported."); } } @@ -128,24 +208,42 @@ extension KAZEAsync on KAZE { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_KAZE_detect(ref, src.ref, ret.ptr, callback), (c) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_KAZE_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); } /// DetectAndCompute keypoints and compute in an image using KAZE. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - Future<(VecKeyPoint, Mat)> detectAndComputeAsync(Mat src, Mat mask) async { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); return cvRunAsync0( - (callback) => cfeatures2d.cv_KAZE_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, callback), + (callback) => cfeatures2d.cv_KAZE_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + callback, + ), (c) { - return c.complete((ret, desc)); + return c.complete((keypoints!, descriptors!)); }, ); } @@ -156,11 +254,29 @@ extension MSERAsync on MSER { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_MSER_detect(ref, src.ref, ret.ptr, callback), (c) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_MSER_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); + } + + /// DetectAndCompute keypoints and compute in an image using AKAZE. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + throw UnsupportedError("This fuction/feature is not supported."); } } @@ -169,11 +285,15 @@ extension ORBAsync on ORB { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_ORB_detect(ref, src.ref, ret.ptr, callback), (c) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_ORB_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); } /// DetectAndCompute keypoints and compute in an image using ORB. @@ -194,8 +314,8 @@ extension ORBAsync on ORB { ref, src.ref, mask.ref, - keypoints!.ptr, description!.ref, + keypoints!.ptr, useProvidedKeypoints, callback, ), @@ -211,15 +331,31 @@ extension SimpleBlobDetectorAsync on SimpleBlobDetector { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); return cvRunAsync0( - (callback) => cfeatures2d.cv_SimpleBlobDetector_detect(ref, src.ref, ret.ptr, callback), + (callback) => + cfeatures2d.cv_SimpleBlobDetector_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), (c) { - return c.complete(ret); + return c.complete(keypoints); }, ); } + + /// DetectAndCompute keypoints and compute in an image using AKAZE. + /// + /// For further details, please see: + /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + throw UnsupportedError("This fuction/feature is not supported."); + } } extension BFMatcherAsync on BFMatcher { @@ -274,24 +410,42 @@ extension SIFTAsync on SIFT { /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#aa4e9a7082ec61ebc108806704fbd7887 - Future detectAsync(Mat src) async { - final ret = VecKeyPoint(); - return cvRunAsync0((callback) => cfeatures2d.cv_SIFT_detect(ref, src.ref, ret.ptr, callback), (c) { - return c.complete(ret); - }); + Future detectAsync(Mat src, {VecKeyPoint? keypoints, Mat? mask}) async { + keypoints ??= VecKeyPoint(); + mask ??= Mat.empty(); + return cvRunAsync0( + (callback) => cfeatures2d.cv_SIFT_detect(ref, src.ref, keypoints!.ptr, mask!.ref, callback), + (c) { + return c.complete(keypoints); + }, + ); } /// DetectAndCompute keypoints and compute in an image using SIFT. /// /// For further details, please see: /// https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html#a8be0d1c20b08eb867184b8d74c15a677 - Future<(VecKeyPoint, Mat)> detectAndComputeAsync(Mat src, Mat mask) async { - final desc = Mat.empty(); - final ret = VecKeyPoint(); + Future<(VecKeyPoint, Mat)> detectAndComputeAsync( + Mat src, + Mat mask, { + Mat? descriptors, + VecKeyPoint? keypoints, + bool useProvidedKeypoints = false, + }) async { + descriptors ??= Mat.empty(); + keypoints ??= VecKeyPoint(); return cvRunAsync0( - (callback) => cfeatures2d.cv_SIFT_detectAndCompute(ref, src.ref, mask.ref, desc.ref, ret.ptr, callback), + (callback) => cfeatures2d.cv_SIFT_detectAndCompute( + ref, + src.ref, + mask.ref, + descriptors!.ref, + keypoints!.ptr, + useProvidedKeypoints, + callback, + ), (c) { - return c.complete((ret, desc)); + return c.complete((keypoints!, descriptors!)); }, ); } diff --git a/packages/dartcv/lib/src/features2d/features2d_base.dart b/packages/dartcv/lib/src/features2d/features2d_base.dart new file mode 100644 index 00000000..f315e3b1 --- /dev/null +++ b/packages/dartcv/lib/src/features2d/features2d_base.dart @@ -0,0 +1,487 @@ +// Copyright (c) 2024, rainyl and all contributors. All rights reserved. +// Use of this source code is governed by a Apache-2.0 license +// that can be found in the LICENSE file. + +// ignore_for_file: constant_identifier_names, non_constant_identifier_names + +library cv.features2d; + +import 'dart:ffi' as ffi; + +import 'package:ffi/ffi.dart'; + +import '../core/base.dart'; +import '../core/keypoint.dart'; +import '../core/mat.dart'; +import '../core/vec.dart'; +import '../g/features2d.g.dart' as cvg; +import '../native_lib.dart' show cfeatures2d; + +abstract class Feature2D extends CvStruct { + Feature2D.fromPointer(super.ptr) : super.fromPointer(); + + VecKeyPoint detect(Mat src, {VecKeyPoint? keypoints, Mat? mask}); + (VecKeyPoint ret, Mat desc) detectAndCompute( + Mat src, + Mat mask, { + VecKeyPoint? keypoints, + Mat? descriptors, + bool useProvidedKeypoints = false, + }); + + String get defaultName => 'Feature2D'; + + bool get isEmpty; +} + +class FlannIndexParams extends CvStruct { + FlannIndexParams.fromPointer(cvg.FlannIndexParamsPtr ptr, [bool attach = true]) : super.fromPointer(ptr) { + if (attach) { + finalizer.attach(this, ptr.cast()); + } + } + + factory FlannIndexParams.empty() { + final p = calloc(); + cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); + return FlannIndexParams.fromPointer(p); + } + + factory FlannIndexParams.fromMap(Map map) { + final params = FlannIndexParams.empty(); + for (final entry in map.entries) { + switch (entry.value) { + case int(): + params.set(entry.key, entry.value as int); + case double(): + params.set(entry.key, entry.value as double); + case String(): + params.set(entry.key, entry.value as String); + case bool(): + params.set(entry.key, entry.value as bool); + case cvg.FlannAlgorithm(): + params.set(entry.key, entry.value as cvg.FlannAlgorithm); + default: + throw ArgumentError('Value type ${entry.value.runtimeType} is not supported for FlannIndexParams'); + } + } + return params; + } + + static final finalizer = OcvFinalizer( + cfeatures2d.addresses.cv_flann_IndexParams_close, + ); + + @override + cvg.FlannIndexParams get ref => ptr.ref; + + String getString(String key, [String defaultValue = ""]) { + final ckey = key.toNativeUtf8().cast(); + final cdefault = defaultValue.toNativeUtf8().cast(); + final crval = calloc>(); + cfeatures2d.cv_flann_IndexParams_getString(ref, ckey, cdefault, crval); + calloc.free(ckey); + calloc.free(cdefault); + + final rval = crval.value.cast().toDartString(); + calloc.free(crval); + return rval; + } + + int getInt(String key, [int defaultValue = -1]) { + final ckey = key.toNativeUtf8().cast(); + final crval = calloc(); + cfeatures2d.cv_flann_IndexParams_getInt(ref, ckey, defaultValue, crval); + calloc.free(ckey); + final rval = crval.value; + calloc.free(crval); + return rval; + } + + double getDouble(String key, [double defaultValue = -1]) { + final ckey = key.toNativeUtf8().cast(); + final crval = calloc(); + cfeatures2d.cv_flann_IndexParams_getDouble(ref, ckey, defaultValue, crval); + calloc.free(ckey); + final rval = crval.value; + calloc.free(crval); + return rval; + } + + // bool getBool(String key, [bool defaultValue = false]) { + // final ckey = key.toNativeUtf8().cast(); + // final crval = calloc(); + // cfeatures2d.cv_flann_IndexParams_getBool(ref, ckey, defaultValue, crval); + // calloc.free(ckey); + // final rval = crval.value; + // calloc.free(crval); + // return rval; + // } + + Map getAll() { + final names = VecVecChar(); + final types = VecI32(); + final strValues = VecVecChar(); + final numValues = VecF64(); + + cfeatures2d.cv_flann_IndexParams_getAll(ref, names.ptr, types.ptr, strValues.ptr, numValues.ptr); + + final rval = {}; + final names1 = names.asStringList(); + for (var i = 0; i < names1.length; i++) { + final name = names1[i]; + final type = types[i]; + rval[name] = switch (cvg.FlannIndexType.fromValue(type)) { + cvg.FlannIndexType.FLANN_INDEX_TYPE_8U || + cvg.FlannIndexType.FLANN_INDEX_TYPE_8S || + cvg.FlannIndexType.FLANN_INDEX_TYPE_16U || + cvg.FlannIndexType.FLANN_INDEX_TYPE_16S || + cvg.FlannIndexType.FLANN_INDEX_TYPE_32S => + numValues[i].toInt(), + cvg.FlannIndexType.FLANN_INDEX_TYPE_32F || cvg.FlannIndexType.FLANN_INDEX_TYPE_64F => numValues[i], + cvg.FlannIndexType.FLANN_INDEX_TYPE_BOOL => numValues[i].toInt() != 0, + cvg.FlannIndexType.FLANN_INDEX_TYPE_STRING => names1[i], + cvg.FlannIndexType.FLANN_INDEX_TYPE_ALGORITHM => cvg.FlannAlgorithm.fromValue(numValues[i].toInt()), + }; + } + + return rval; + } + + void setString(String key, String value) { + final ckey = key.toNativeUtf8().cast(); + final cvalue = value.toNativeUtf8().cast(); + cfeatures2d.cv_flann_IndexParams_setString(ref, ckey, cvalue); + calloc.free(ckey); + calloc.free(cvalue); + } + + void setInt(String key, int value) { + final ckey = key.toNativeUtf8().cast(); + cfeatures2d.cv_flann_IndexParams_setInt(ref, ckey, value); + calloc.free(ckey); + } + + void setDouble(String key, double value) { + final ckey = key.toNativeUtf8().cast(); + cfeatures2d.cv_flann_IndexParams_setDouble(ref, ckey, value); + calloc.free(ckey); + } + + void setBool(String key, bool value) { + final ckey = key.toNativeUtf8().cast(); + cfeatures2d.cv_flann_IndexParams_setBool(ref, ckey, value); + calloc.free(ckey); + } + + void setAlgorithm(cvg.FlannAlgorithm value) { + cfeatures2d.cv_flann_IndexParams_setAlgorithm(ref, value.value); + } + + T get(String key, [T? defaultValue]) { + if (T == int) { + return getInt(key, defaultValue as int? ?? -1) as T; + } else if (T == double) { + return getDouble(key, defaultValue as double? ?? -1.0) as T; + } else if (T == String) { + return getString(key, defaultValue as String? ?? "") as T; + } else { + throw ArgumentError("Unsupported type: ${T.runtimeType}"); + } + } + + void set(String key, T value) { + switch (value) { + case int(): + setInt(key, value); + case double(): + setDouble(key, value); + case String(): + setString(key, value); + case bool(): + setBool(key, value); + case cvg.FlannAlgorithm(): + setAlgorithm(value); + default: + throw ArgumentError("Unsupported type: ${value.runtimeType}"); + } + } + + @override + String toString() { + return "FlannIndexParams(address=0x${ptr.address.toRadixString(16)})"; + } +} + +class FlannSearchParams extends FlannIndexParams { + FlannSearchParams.fromPointer( + super.ptr, + int checks, + double eps, + bool sorted, + bool exploreAllTrees, [ + super.attach = true, + ]) : _checks = checks, + _eps = eps, + _sorted = sorted, + _exploreAllTrees = exploreAllTrees, + super.fromPointer(); + + factory FlannSearchParams({ + int checks = 32, + double eps = 0.0, + bool sorted = true, + bool exploreAllTrees = false, + }) { + final p = calloc(); + cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); + final params = FlannSearchParams.fromPointer(p, checks, eps, sorted, exploreAllTrees); + + params.setInt('checks', checks); + params.setDouble('eps', eps); + + params.setInt('sorted', sorted ? 1 : 0); + params.setInt('explore_all_trees', exploreAllTrees ? 1 : 0); + + return params; + } + + int _checks; + double _eps; + bool _sorted; + bool _exploreAllTrees; + + int get checks => _checks; + double get eps => _eps; + bool get sorted => _sorted; + bool get exploreAllTrees => _exploreAllTrees; + + set checks(int value) { + _checks = value; + setInt("checks", value); + } + + set eps(double value) { + _eps = value; + setDouble("eps", value); + } + + set sorted(bool value) { + _sorted = value; + setInt("sorted", value ? 1 : 0); + } + + set exploreAllTrees(bool value) { + _exploreAllTrees = value; + setInt("explore_all_trees", value ? 1 : 0); + } + + @override + String toString() { + return "FlannSearchParams(" + "address=0x${ptr.address.toRadixString(16)}, " + "checks=$checks, " + "eps=$eps, " + "sorted=$sorted, " + "exploreAllTrees=$exploreAllTrees)"; + } +} + +class FlannKDTreeIndexParams extends FlannIndexParams { + FlannKDTreeIndexParams.fromPointer(super.ptr, [super.attach = true]) : super.fromPointer(); + + factory FlannKDTreeIndexParams({int trees = 4}) { + final p = calloc(); + cvRun(() => cfeatures2d.cv_flann_IndexParams_create(p)); + final params = FlannKDTreeIndexParams.fromPointer(p); + + params.setAlgorithm(cvg.FlannAlgorithm.FLANN_INDEX_KDTREE); + params.setInt('trees', trees); + + return params; + } + + int get trees => getInt("trees"); + set trees(int value) => setInt("trees", value); + + @override + String toString() { + return 'FlannKDTreeIndexParams(address=0x${ptr.address.toRadixString(16)}, trees=$trees)'; + } +} + +class SimpleBlobDetectorParams extends CvStruct { + SimpleBlobDetectorParams._(ffi.Pointer ptr, [bool attach = true]) + : super.fromPointer(ptr) { + if (attach) { + finalizer.attach(this, ptr.cast(), detach: this); + } + } + + factory SimpleBlobDetectorParams.empty() { + final p = calloc(); + cvRun(() => cfeatures2d.cv_SimpleBlobDetectorParams_create(p)); + return SimpleBlobDetectorParams._(p); + } + + factory SimpleBlobDetectorParams({ + int? blobColor, + bool? filterByArea, + bool? filterByCircularity, + bool? filterByColor, + bool? filterByConvexity, + bool? filterByInertia, + double? maxArea, + double? maxCircularity, + double? maxConvexity, + double? maxInertiaRatio, + double? maxThreshold, + double? minArea, + double? minCircularity, + double? minConvexity, + double? minDistBetweenBlobs, + double? minInertiaRatio, + int? minRepeatability, + double? minThreshold, + double? thresholdStep, + }) { + final p = calloc(); + if (blobColor != null) p.ref.blobColor = blobColor; + if (filterByArea != null) p.ref.filterByArea = filterByArea; + if (filterByCircularity != null) { + p.ref.filterByCircularity = filterByCircularity; + } + if (filterByColor != null) p.ref.filterByColor = filterByColor; + if (filterByConvexity != null) p.ref.filterByConvexity = filterByConvexity; + if (filterByInertia != null) p.ref.filterByInertia = filterByInertia; + if (maxArea != null) p.ref.maxArea = maxArea; + if (maxCircularity != null) p.ref.maxCircularity = maxCircularity; + if (maxConvexity != null) p.ref.maxConvexity = maxConvexity; + if (maxInertiaRatio != null) p.ref.maxInertiaRatio = maxInertiaRatio; + if (maxThreshold != null) p.ref.maxThreshold = maxThreshold; + if (minArea != null) p.ref.minArea = minArea; + if (minCircularity != null) p.ref.minCircularity = minCircularity; + if (minConvexity != null) p.ref.minConvexity = minConvexity; + if (minDistBetweenBlobs != null) { + p.ref.minDistBetweenBlobs = minDistBetweenBlobs; + } + if (minInertiaRatio != null) p.ref.minInertiaRatio = minInertiaRatio; + if (minRepeatability != null) p.ref.minRepeatability = minRepeatability; + if (minThreshold != null) p.ref.minThreshold = minThreshold; + if (thresholdStep != null) p.ref.thresholdStep = thresholdStep; + + return SimpleBlobDetectorParams._(p); + } + + factory SimpleBlobDetectorParams.fromNative(cvg.SimpleBlobDetectorParams r) => SimpleBlobDetectorParams( + blobColor: r.blobColor, + filterByArea: r.filterByArea, + filterByCircularity: r.filterByCircularity, + filterByColor: r.filterByColor, + filterByConvexity: r.filterByConvexity, + filterByInertia: r.filterByInertia, + maxArea: r.maxArea, + maxCircularity: r.maxCircularity, + maxConvexity: r.maxConvexity, + maxInertiaRatio: r.maxInertiaRatio, + maxThreshold: r.maxThreshold, + minArea: r.minArea, + minCircularity: r.minCircularity, + minConvexity: r.minConvexity, + minDistBetweenBlobs: r.minDistBetweenBlobs, + minInertiaRatio: r.minInertiaRatio, + minRepeatability: r.minRepeatability, + minThreshold: r.minThreshold, + thresholdStep: r.thresholdStep, + ); + factory SimpleBlobDetectorParams.fromPointer( + ffi.Pointer p, [ + bool attach = true, + ]) => + SimpleBlobDetectorParams._(p, attach); + + @override + cvg.SimpleBlobDetectorParams get ref => ptr.ref; + + static final finalizer = ffi.NativeFinalizer(calloc.nativeFree); + + void dispose() { + finalizer.detach(this); + calloc.free(ptr); + } + + int get blobColor => ref.blobColor; + set blobColor(int value) => ref.blobColor = value; + + bool get filterByArea => ref.filterByArea; + set filterByArea(bool value) => ref.filterByArea = value; + + bool get filterByCircularity => ref.filterByCircularity; + set filterByCircularity(bool value) => ref.filterByCircularity = value; + + bool get filterByColor => ref.filterByColor; + set filterByColor(bool value) => ref.filterByColor = value; + + bool get filterByConvexity => ref.filterByConvexity; + set filterByConvexity(bool value) => ref.filterByConvexity = value; + + bool get filterByInertia => ref.filterByInertia; + set filterByInertia(bool value) => ref.filterByInertia = value; + + double get maxArea => ref.maxArea; + set maxArea(double v) => ref.maxArea = v; + + double get maxCircularity => ref.maxCircularity; + set maxCircularity(double v) => ref.maxCircularity = v; + + double get maxConvexity => ref.maxConvexity; + set maxConvexity(double v) => ref.maxConvexity = v; + + double get maxInertiaRatio => ref.maxInertiaRatio; + set maxInertiaRatio(double v) => ref.maxInertiaRatio = v; + + double get maxThreshold => ref.maxThreshold; + set maxThreshold(double v) => ref.maxThreshold = v; + + double get minArea => ref.minArea; + set minArea(double v) => ref.minArea = v; + + double get minCircularity => ref.minCircularity; + set minCircularity(double v) => ref.minCircularity = v; + + double get minConvexity => ref.minConvexity; + set minConvexity(double v) => ref.minConvexity = v; + + double get minDistBetweenBlobs => ref.minDistBetweenBlobs; + set minDistBetweenBlobs(double v) => ref.minDistBetweenBlobs = v; + + double get minInertiaRatio => ref.minInertiaRatio; + set minInertiaRatio(double v) => ref.minInertiaRatio = v; + + int get minRepeatability => ref.minRepeatability; + set minRepeatability(int v) => ref.minRepeatability = v; + + double get minThreshold => ref.minThreshold; + set minThreshold(double v) => ref.minThreshold = v; + + double get thresholdStep => ref.thresholdStep; + set thresholdStep(double v) => ref.thresholdStep = v; + + @override + List get props => [ + maxArea, + minArea, + minConvexity, + maxConvexity, + minInertiaRatio, + maxInertiaRatio, + minThreshold, + maxThreshold, + thresholdStep, + minDistBetweenBlobs, + minRepeatability, + minThreshold, + thresholdStep, + minDistBetweenBlobs, + ]; +} diff --git a/packages/dartcv/lib/src/features2d/features2d_enum.dart b/packages/dartcv/lib/src/features2d/features2d_enum.dart new file mode 100644 index 00000000..cc28635a --- /dev/null +++ b/packages/dartcv/lib/src/features2d/features2d_enum.dart @@ -0,0 +1,113 @@ +enum AKAZEDescriptorType { + DESCRIPTOR_KAZE_UPRIGHT(2), + DESCRIPTOR_KAZE(3), + DESCRIPTOR_MLDB_UPRIGHT(4), + DESCRIPTOR_MLDB(5); + + const AKAZEDescriptorType(this.value); + final int value; + + factory AKAZEDescriptorType.fromValue(int value) => switch (value) { + 2 => DESCRIPTOR_KAZE_UPRIGHT, + 3 => DESCRIPTOR_KAZE, + 4 => DESCRIPTOR_MLDB_UPRIGHT, + 5 => DESCRIPTOR_MLDB, + _ => throw ArgumentError.value(value, 'value', 'Invalid AKAZE descriptor type'), + }; +} + +enum KAZEDiffusivityType { + DIFF_PM_G1(0), + DIFF_PM_G2(1), + DIFF_WEICKERT(2), + DIFF_CHARBONNIER(3); + + const KAZEDiffusivityType(this.value); + final int value; + + factory KAZEDiffusivityType.fromValue(int value) => switch (value) { + 0 => DIFF_PM_G1, + 1 => DIFF_PM_G2, + 2 => DIFF_WEICKERT, + 3 => DIFF_CHARBONNIER, + _ => throw ArgumentError.value(value, 'value', 'Invalid KAZE diffusivity type'), + }; +} + +enum AgastDetectorType { + AGAST_5_8(0), + AGAST_7_12d(1), + AGAST_7_12s(2), + OAST_9_16(3); + + const AgastDetectorType(this.value); + final int value; + + factory AgastDetectorType.fromValue(int value) => switch (value) { + 0 => AGAST_5_8, + 1 => AGAST_7_12d, + 2 => AGAST_7_12s, + 3 => OAST_9_16, + _ => throw ArgumentError.value(value, 'value', 'Invalid detector type'), + }; +} + +enum FastFeatureDetectorType { + /// FastFeatureDetector::TYPE_5_8 + TYPE_5_8(0), + + /// FastFeatureDetector::TYPE_7_12 + TYPE_7_12(1), + + /// FastFeatureDetector::TYPE_9_16 + TYPE_9_16(2); + + const FastFeatureDetectorType(this.value); + final int value; + + factory FastFeatureDetectorType.fromValue(int value) => switch (value) { + 0 => TYPE_5_8, + 1 => TYPE_7_12, + 2 => TYPE_9_16, + _ => throw ArgumentError.value(value, 'value', 'Invalid FastFeatureDetector type'), + }; +} + +enum ORBScoreType { + HARRIS_SCORE(0), + FAST_SCORE(1); + + const ORBScoreType(this.value); + final int value; + + factory ORBScoreType.fromValue(int value) => switch (value) { + 0 => HARRIS_SCORE, + 1 => FAST_SCORE, + _ => throw ArgumentError.value(value, 'value', 'Invalid ORB score type'), + }; +} + +enum DrawMatchesFlag { + /// DEFAULT creates new image and for each keypoint only the center point will be drawn + DEFAULT(0), + + /// DRAW_OVER_OUTIMG draws matches on existing content of image + DRAW_OVER_OUTIMG(1), + + /// NOT_DRAW_SINGLE_POINTS will not draw single points + NOT_DRAW_SINGLE_POINTS(2), + + /// DRAW_RICH_KEYPOINTS draws the circle around each keypoint with keypoint size and orientation + DRAW_RICH_KEYPOINTS(4); + + const DrawMatchesFlag(this.value); + final int value; + + factory DrawMatchesFlag.fromValue(int value) => switch (value) { + 0 => DEFAULT, + 1 => DRAW_OVER_OUTIMG, + 2 => NOT_DRAW_SINGLE_POINTS, + 4 => DRAW_RICH_KEYPOINTS, + _ => throw ArgumentError.value(value, 'value', 'Invalid DrawMatchesFlag value'), + }; +} diff --git a/packages/dartcv/lib/src/g/calib3d.g.dart b/packages/dartcv/lib/src/g/calib3d.g.dart index 80c0e599..696bae95 100644 --- a/packages/dartcv/lib/src/g/calib3d.g.dart +++ b/packages/dartcv/lib/src/g/calib3d.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/constants.g.dart b/packages/dartcv/lib/src/g/constants.g.dart index f1e069a8..a84a6cf4 100644 --- a/packages/dartcv/lib/src/g/constants.g.dart +++ b/packages/dartcv/lib/src/g/constants.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/contrib.g.dart b/packages/dartcv/lib/src/g/contrib.g.dart index 8aee5a2e..b214d2eb 100644 --- a/packages/dartcv/lib/src/g/contrib.g.dart +++ b/packages/dartcv/lib/src/g/contrib.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 @@ -1420,6 +1419,203 @@ class CvNativeContrib { _cv_aruco_getPredefinedDictionaryPtr.asFunction< ffi.Pointer Function(int, ffi.Pointer)>(); + void cv_freetype_FreeType2_close( + FreeType2Ptr self, + ) { + return _cv_freetype_FreeType2_close( + self, + ); + } + + late final _cv_freetype_FreeType2_closePtr = + _lookup>( + 'cv_freetype_FreeType2_close'); + late final _cv_freetype_FreeType2_close = + _cv_freetype_FreeType2_closePtr.asFunction(); + + /// C wrappers + ffi.Pointer cv_freetype_FreeType2_create( + ffi.Pointer rval, + ) { + return _cv_freetype_FreeType2_create( + rval, + ); + } + + late final _cv_freetype_FreeType2_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>('cv_freetype_FreeType2_create'); + late final _cv_freetype_FreeType2_create = _cv_freetype_FreeType2_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_freetype_FreeType2_getTextSize( + FreeType2 self, + ffi.Pointer text, + int fontHeight, + int thickness, + ffi.Pointer baseLine, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_freetype_FreeType2_getTextSize( + self, + text, + fontHeight, + thickness, + baseLine, + rval, + callback, + ); + } + + late final _cv_freetype_FreeType2_getTextSizePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FreeType2, + ffi.Pointer, + ffi.Int, + ffi.Int, + ffi.Pointer, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_freetype_FreeType2_getTextSize'); + late final _cv_freetype_FreeType2_getTextSize = + _cv_freetype_FreeType2_getTextSizePtr.asFunction< + ffi.Pointer Function( + FreeType2, + ffi.Pointer, + int, + int, + ffi.Pointer, + ffi.Pointer, + imp$1.CvCallback_0)>(); + + ffi.Pointer cv_freetype_FreeType2_loadFontData( + FreeType2 self, + ffi.Pointer fontFileName, + int idx, + imp$1.CvCallback_0 callback, + ) { + return _cv_freetype_FreeType2_loadFontData( + self, + fontFileName, + idx, + callback, + ); + } + + late final _cv_freetype_FreeType2_loadFontDataPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FreeType2, + ffi.Pointer, + ffi.Int, + imp$1.CvCallback_0)>>('cv_freetype_FreeType2_loadFontData'); + late final _cv_freetype_FreeType2_loadFontData = + _cv_freetype_FreeType2_loadFontDataPtr.asFunction< + ffi.Pointer Function( + FreeType2, ffi.Pointer, int, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_freetype_FreeType2_loadFontData_buf( + FreeType2 self, + ffi.Pointer pBuf, + int bufSize, + int idx, + imp$1.CvCallback_0 callback, + ) { + return _cv_freetype_FreeType2_loadFontData_buf( + self, + pBuf, + bufSize, + idx, + callback, + ); + } + + late final _cv_freetype_FreeType2_loadFontData_bufPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FreeType2, + ffi.Pointer, + ffi.Size, + ffi.Int, + imp$1.CvCallback_0)>>('cv_freetype_FreeType2_loadFontData_buf'); + late final _cv_freetype_FreeType2_loadFontData_buf = + _cv_freetype_FreeType2_loadFontData_bufPtr.asFunction< + ffi.Pointer Function(FreeType2, ffi.Pointer, int, + int, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_freetype_FreeType2_putText( + FreeType2 self, + imp$1.MatInOut img, + ffi.Pointer text, + CvPoint org, + int fontHeight, + Scalar color, + int thickness, + int line_type, + bool bottomLeftOrigin, + imp$1.CvCallback_0 callback, + ) { + return _cv_freetype_FreeType2_putText( + self, + img, + text, + org, + fontHeight, + color, + thickness, + line_type, + bottomLeftOrigin, + callback, + ); + } + + late final _cv_freetype_FreeType2_putTextPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FreeType2, + imp$1.MatInOut, + ffi.Pointer, + CvPoint, + ffi.Int, + Scalar, + ffi.Int, + ffi.Int, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_freetype_FreeType2_putText'); + late final _cv_freetype_FreeType2_putText = + _cv_freetype_FreeType2_putTextPtr.asFunction< + ffi.Pointer Function( + FreeType2, + imp$1.MatInOut, + ffi.Pointer, + CvPoint, + int, + Scalar, + int, + int, + bool, + imp$1.CvCallback_0)>(); + + ffi.Pointer cv_freetype_FreeType2_setSplitNumber( + FreeType2 self, + int num, + ) { + return _cv_freetype_FreeType2_setSplitNumber( + self, + num, + ); + } + + late final _cv_freetype_FreeType2_setSplitNumberPtr = _lookup< + ffi + .NativeFunction Function(FreeType2, ffi.Int)>>( + 'cv_freetype_FreeType2_setSplitNumber'); + late final _cv_freetype_FreeType2_setSplitNumber = + _cv_freetype_FreeType2_setSplitNumberPtr + .asFunction Function(FreeType2, int)>(); + void cv_img_hash_BlockMeanHash_close( BlockMeanHashPtr self, ) { @@ -4027,6 +4223,9 @@ class _SymbolAddresses { ffi.Pointer> get cv_aruco_detectorParameters_close => _library._cv_aruco_detectorParameters_closePtr; + ffi.Pointer> + get cv_freetype_FreeType2_close => + _library._cv_freetype_FreeType2_closePtr; ffi.Pointer> get cv_img_hash_BlockMeanHash_close => _library._cv_img_hash_BlockMeanHash_closePtr; @@ -4169,6 +4368,12 @@ final class EdgeDrawingParams extends ffi.Struct { typedef EdgeDrawingPtr = ffi.Pointer; +final class FreeType2 extends ffi.Struct { + external ffi.Pointer ptr; +} + +typedef FreeType2Ptr = ffi.Pointer; + final class GraphSegmentation extends ffi.Struct { external ffi.Pointer> ptr; } diff --git a/packages/dartcv/lib/src/g/contrib.yaml b/packages/dartcv/lib/src/g/contrib.yaml index 3b020909..973be3a9 100644 --- a/packages/dartcv/lib/src/g/contrib.yaml +++ b/packages/dartcv/lib/src/g/contrib.yaml @@ -166,6 +166,20 @@ files: name: cv_aruco_generateImageMarker c:@F@cv_aruco_getPredefinedDictionary: name: cv_aruco_getPredefinedDictionary + c:@F@cv_freetype_FreeType2_close: + name: cv_freetype_FreeType2_close + c:@F@cv_freetype_FreeType2_create: + name: cv_freetype_FreeType2_create + c:@F@cv_freetype_FreeType2_getTextSize: + name: cv_freetype_FreeType2_getTextSize + c:@F@cv_freetype_FreeType2_loadFontData: + name: cv_freetype_FreeType2_loadFontData + c:@F@cv_freetype_FreeType2_loadFontData_buf: + name: cv_freetype_FreeType2_loadFontData_buf + c:@F@cv_freetype_FreeType2_putText: + name: cv_freetype_FreeType2_putText + c:@F@cv_freetype_FreeType2_setSplitNumber: + name: cv_freetype_FreeType2_setSplitNumber c:@F@cv_img_hash_BlockMeanHash_close: name: cv_img_hash_BlockMeanHash_close c:@F@cv_img_hash_BlockMeanHash_compare: @@ -428,6 +442,8 @@ files: name: EdgeDrawing c:@S@EdgeDrawingParams: name: EdgeDrawingParams + c:@S@FreeType2: + name: FreeType2 c:@S@GraphSegmentation: name: GraphSegmentation c:@S@PtrWBDetector: @@ -454,6 +470,8 @@ files: name: ArucoDetectorPtr c:aruco.h@T@ArucoDictionaryPtr: name: ArucoDictionaryPtr + c:freetype.h@T@FreeType2Ptr: + name: FreeType2Ptr c:img_hash.h@T@BlockMeanHashPtr: name: BlockMeanHashPtr c:quality.h@T@QualityBRISQUEPtr: diff --git a/packages/dartcv/lib/src/g/core.g.dart b/packages/dartcv/lib/src/g/core.g.dart index c9258468..7888d11a 100644 --- a/packages/dartcv/lib/src/g/core.g.dart +++ b/packages/dartcv/lib/src/g/core.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 @@ -832,6 +831,30 @@ class CvNativeCore { late final _cv_Mat_flags = _cv_Mat_flagsPtr.asFunction(isLeaf: true); + ffi.Pointer cv_Mat_getUMat( + Mat self, + int accessFlags, + int usageFlags, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_Mat_getUMat( + self, + accessFlags, + usageFlags, + rval, + callback, + ); + } + + late final _cv_Mat_getUMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, ffi.Int, ffi.Int, + ffi.Pointer, imp$1.CvCallback_0)>>('cv_Mat_getUMat'); + late final _cv_Mat_getUMat = _cv_Mat_getUMatPtr.asFunction< + ffi.Pointer Function( + Mat, int, int, ffi.Pointer, imp$1.CvCallback_0)>(isLeaf: true); + ffi.Pointer cv_Mat_get_Vec2b( Mat self, int i0, @@ -3871,229 +3894,1334 @@ class CvNativeCore { ); } - late final _cv_RNG_gaussianPtr = _lookup< + late final _cv_RNG_gaussianPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + RNG, ffi.Double, ffi.Pointer)>>('cv_RNG_gaussian'); + late final _cv_RNG_gaussian = _cv_RNG_gaussianPtr.asFunction< + ffi.Pointer Function(RNG, double, ffi.Pointer)>(); + + ffi.Pointer cv_RNG_next( + RNG rng, + ffi.Pointer rval, + ) { + return _cv_RNG_next( + rng, + rval, + ); + } + + late final _cv_RNG_nextPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + RNG, ffi.Pointer)>>('cv_RNG_next'); + late final _cv_RNG_next = _cv_RNG_nextPtr.asFunction< + ffi.Pointer Function(RNG, ffi.Pointer)>(); + + ffi.Pointer cv_RNG_uniform( + RNG rng, + int a, + int b, + ffi.Pointer rval, + ) { + return _cv_RNG_uniform( + rng, + a, + b, + rval, + ); + } + + late final _cv_RNG_uniformPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + RNG, ffi.Int, ffi.Int, ffi.Pointer)>>('cv_RNG_uniform'); + late final _cv_RNG_uniform = _cv_RNG_uniformPtr.asFunction< + ffi.Pointer Function(RNG, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_RNG_uniformDouble( + RNG rng, + double a, + double b, + ffi.Pointer rval, + ) { + return _cv_RNG_uniformDouble( + rng, + a, + b, + rval, + ); + } + + late final _cv_RNG_uniformDoublePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(RNG, ffi.Double, ffi.Double, + ffi.Pointer)>>('cv_RNG_uniformDouble'); + late final _cv_RNG_uniformDouble = _cv_RNG_uniformDoublePtr.asFunction< + ffi.Pointer Function( + RNG, double, double, ffi.Pointer)>(); + + ffi.Pointer cv_RotatedRect_boundingRect( + RotatedRect rect, + ffi.Pointer rval, + ) { + return _cv_RotatedRect_boundingRect( + rect, + rval, + ); + } + + late final _cv_RotatedRect_boundingRectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(RotatedRect, + ffi.Pointer)>>('cv_RotatedRect_boundingRect'); + late final _cv_RotatedRect_boundingRect = + _cv_RotatedRect_boundingRectPtr.asFunction< + ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + + ffi.Pointer cv_RotatedRect_boundingRect2f( + RotatedRect rect, + ffi.Pointer rval, + ) { + return _cv_RotatedRect_boundingRect2f( + rect, + rval, + ); + } + + late final _cv_RotatedRect_boundingRect2fPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(RotatedRect, + ffi.Pointer)>>('cv_RotatedRect_boundingRect2f'); + late final _cv_RotatedRect_boundingRect2f = + _cv_RotatedRect_boundingRect2fPtr.asFunction< + ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + + ffi.Pointer cv_RotatedRect_points( + RotatedRect rect, + ffi.Pointer out_pts, + ) { + return _cv_RotatedRect_points( + rect, + out_pts, + ); + } + + late final _cv_RotatedRect_pointsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + RotatedRect, ffi.Pointer)>>('cv_RotatedRect_points'); + late final _cv_RotatedRect_points = _cv_RotatedRect_pointsPtr.asFunction< + ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + + ffi.Pointer cv_SVBackSubst( + Mat w, + Mat u, + Mat vt, + Mat rhs, + Mat dst, + imp$1.CvCallback_0 callback, + ) { + return _cv_SVBackSubst( + w, + u, + vt, + rhs, + dst, + callback, + ); + } + + late final _cv_SVBackSubstPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>>('cv_SVBackSubst'); + late final _cv_SVBackSubst = _cv_SVBackSubstPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_SVD_Compute( + Mat src, + Mat w_r, + Mat u_r, + Mat vt_r, + int flags, + imp$1.CvCallback_0 callback, + ) { + return _cv_SVD_Compute( + src, + w_r, + u_r, + vt_r, + flags, + callback, + ); + } + + late final _cv_SVD_ComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, ffi.Int, + imp$1.CvCallback_0)>>('cv_SVD_Compute'); + late final _cv_SVD_Compute = _cv_SVD_ComputePtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, int, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_SVD_backSubst( + Mat w, + Mat u, + Mat vt, + Mat rhs, + Mat dst, + imp$1.CvCallback_0 callback, + ) { + return _cv_SVD_backSubst( + w, + u, + vt, + rhs, + dst, + callback, + ); + } + + late final _cv_SVD_backSubstPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, + imp$1.CvCallback_0)>>('cv_SVD_backSubst'); + late final _cv_SVD_backSubst = _cv_SVD_backSubstPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_SVDecomp( + Mat w, + Mat u, + Mat vt, + Mat d, + int flags, + imp$1.CvCallback_0 callback, + ) { + return _cv_SVDecomp( + w, + u, + vt, + d, + flags, + callback, + ); + } + + late final _cv_SVDecompPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, ffi.Int, imp$1.CvCallback_0)>>('cv_SVDecomp'); + late final _cv_SVDecomp = _cv_SVDecompPtr.asFunction< + ffi.Pointer Function( + Mat, Mat, Mat, Mat, int, imp$1.CvCallback_0)>(); + + void cv_UMat_addref( + UMat self, + ) { + return _cv_UMat_addref( + self, + ); + } + + late final _cv_UMat_addrefPtr = + _lookup>('cv_UMat_addref'); + late final _cv_UMat_addref = + _cv_UMat_addrefPtr.asFunction(); + + int cv_UMat_channels( + UMat self, + ) { + return _cv_UMat_channels( + self, + ); + } + + late final _cv_UMat_channelsPtr = + _lookup>('cv_UMat_channels'); + late final _cv_UMat_channels = + _cv_UMat_channelsPtr.asFunction(); + + int cv_UMat_checkVector( + UMat self, + int elemChannels, + int depth, + bool requireContinuous, + ) { + return _cv_UMat_checkVector( + self, + elemChannels, + depth, + requireContinuous, + ); + } + + late final _cv_UMat_checkVectorPtr = _lookup< + ffi + .NativeFunction>( + 'cv_UMat_checkVector'); + late final _cv_UMat_checkVector = + _cv_UMat_checkVectorPtr.asFunction(); + + ffi.Pointer cv_UMat_clone( + UMat self, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_clone( + self, + rval, + callback, + ); + } + + late final _cv_UMat_clonePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_clone'); + late final _cv_UMat_clone = _cv_UMat_clonePtr.asFunction< + ffi.Pointer Function( + UMat, ffi.Pointer, imp$1.CvCallback_0)>(); + + void cv_UMat_close( + imp$1.UMatPtr self, + ) { + return _cv_UMat_close( + self, + ); + } + + late final _cv_UMat_closePtr = + _lookup>( + 'cv_UMat_close'); + late final _cv_UMat_close = + _cv_UMat_closePtr.asFunction(); + + ffi.Pointer cv_UMat_col( + UMat self, + int x, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_col( + self, + x, + rval, + callback, + ); + } + + late final _cv_UMat_colPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_col'); + late final _cv_UMat_col = _cv_UMat_colPtr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_colRange( + UMat self, + int startcol, + int endcol, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_colRange( + self, + startcol, + endcol, + rval, + callback, + ); + } + + late final _cv_UMat_colRangePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, + ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_colRange'); + late final _cv_UMat_colRange = _cv_UMat_colRangePtr.asFunction< + ffi.Pointer Function( + UMat, int, int, ffi.Pointer, imp$1.CvCallback_0)>(); + + int cv_UMat_cols( + UMat self, + ) { + return _cv_UMat_cols( + self, + ); + } + + late final _cv_UMat_colsPtr = + _lookup>('cv_UMat_cols'); + late final _cv_UMat_cols = _cv_UMat_colsPtr.asFunction(); + + ffi.Pointer cv_UMat_convertTo( + UMat self, + int rtype, + double alpha, + double beta, + UMat dst, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_convertTo( + self, + rtype, + alpha, + beta, + dst, + callback, + ); + } + + late final _cv_UMat_convertToPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Double, ffi.Double, + UMat, imp$1.CvCallback_0)>>('cv_UMat_convertTo'); + late final _cv_UMat_convertTo = _cv_UMat_convertToPtr.asFunction< + ffi.Pointer Function( + UMat, int, double, double, UMat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_copyTo( + UMat self, + UMat dst, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_copyTo( + self, + dst, + callback, + ); + } + + late final _cv_UMat_copyToPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, UMat, imp$1.CvCallback_0)>>('cv_UMat_copyTo'); + late final _cv_UMat_copyTo = _cv_UMat_copyToPtr.asFunction< + ffi.Pointer Function(UMat, UMat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_copyTo_2( + UMat self, + UMat mask, + UMat dst, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_copyTo_2( + self, + mask, + dst, + callback, + ); + } + + late final _cv_UMat_copyTo_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, UMat, UMat, imp$1.CvCallback_0)>>('cv_UMat_copyTo_2'); + late final _cv_UMat_copyTo_2 = _cv_UMat_copyTo_2Ptr.asFunction< + ffi.Pointer Function(UMat, UMat, UMat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_createFunc( + UMat self, + int rows, + int cols, + int type, + int usageFlags, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_createFunc( + self, + rows, + cols, + type, + usageFlags, + callback, + ); + } + + late final _cv_UMat_createFuncPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, ffi.Int, + ffi.Int, imp$1.CvCallback_0)>>('cv_UMat_createFunc'); + late final _cv_UMat_createFunc = _cv_UMat_createFuncPtr.asFunction< + ffi.Pointer Function( + UMat, int, int, int, int, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_createFunc_2( + UMat self, + int ndims, + ffi.Pointer sizes, + int type, + int usageFlags, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_createFunc_2( + self, + ndims, + sizes, + type, + usageFlags, + callback, + ); + } + + late final _cv_UMat_createFunc_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + ffi.Int, ffi.Int, imp$1.CvCallback_0)>>('cv_UMat_createFunc_2'); + late final _cv_UMat_createFunc_2 = _cv_UMat_createFunc_2Ptr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, int, int, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_create_1( + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_1( + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, ffi.Pointer)>>('cv_UMat_create_1'); + late final _cv_UMat_create_1 = _cv_UMat_create_1Ptr + .asFunction Function(int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_2( + int rows, + int cols, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_2( + rows, + cols, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Int, ffi.Int, + ffi.Pointer)>>('cv_UMat_create_2'); + late final _cv_UMat_create_2 = _cv_UMat_create_2Ptr.asFunction< + ffi.Pointer Function(int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_3( + int rows, + int cols, + int type, + Scalar s, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_3( + rows, + cols, + type, + s, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_3Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Int, Scalar, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_3'); + late final _cv_UMat_create_3 = _cv_UMat_create_3Ptr.asFunction< + ffi.Pointer Function( + int, int, int, Scalar, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_4( + int ndims, + ffi.Pointer sizes, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_4( + ndims, + sizes, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_4Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Pointer, ffi.Int, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_4'); + late final _cv_UMat_create_4 = _cv_UMat_create_4Ptr.asFunction< + ffi.Pointer Function( + int, ffi.Pointer, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_5( + int ndims, + ffi.Pointer sizes, + int type, + Scalar value, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_5( + ndims, + sizes, + type, + value, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_5Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Pointer, ffi.Int, + Scalar, ffi.Int, ffi.Pointer)>>('cv_UMat_create_5'); + late final _cv_UMat_create_5 = _cv_UMat_create_5Ptr.asFunction< + ffi.Pointer Function( + int, ffi.Pointer, int, Scalar, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_6( + UMat self, + ffi.Pointer rval, + ) { + return _cv_UMat_create_6( + self, + rval, + ); + } + + late final _cv_UMat_create_6Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, ffi.Pointer)>>('cv_UMat_create_6'); + late final _cv_UMat_create_6 = _cv_UMat_create_6Ptr + .asFunction Function(UMat, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_7( + UMat self, + int rowStart, + int rowEnd, + int colStart, + int colEnd, + ffi.Pointer rval, + ) { + return _cv_UMat_create_7( + self, + rowStart, + rowEnd, + colStart, + colEnd, + rval, + ); + } + + late final _cv_UMat_create_7Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, ffi.Int, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_7'); + late final _cv_UMat_create_7 = _cv_UMat_create_7Ptr.asFunction< + ffi.Pointer Function( + UMat, int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_8( + UMat self, + int x, + int y, + int width, + int height, + ffi.Pointer rval, + ) { + return _cv_UMat_create_8( + self, + x, + y, + width, + height, + rval, + ); + } + + late final _cv_UMat_create_8Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, ffi.Int, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_8'); + late final _cv_UMat_create_8 = _cv_UMat_create_8Ptr.asFunction< + ffi.Pointer Function( + UMat, int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_9( + UMat self, + CvRect roi, + ffi.Pointer rval, + ) { + return _cv_UMat_create_9( + self, + roi, + rval, + ); + } + + late final _cv_UMat_create_9Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, CvRect, ffi.Pointer)>>('cv_UMat_create_9'); + late final _cv_UMat_create_9 = _cv_UMat_create_9Ptr.asFunction< + ffi.Pointer Function(UMat, CvRect, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_diag( + UMat d, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_diag( + d, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_diagPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + UMat, ffi.Int, ffi.Pointer)>>('cv_UMat_create_diag'); + late final _cv_UMat_create_diag = _cv_UMat_create_diagPtr.asFunction< + ffi.Pointer Function(UMat, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_eye( + int rows, + int cols, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_eye( + rows, + cols, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_eyePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Int, ffi.Int, + ffi.Pointer)>>('cv_UMat_create_eye'); + late final _cv_UMat_create_eye = _cv_UMat_create_eyePtr.asFunction< + ffi.Pointer Function(int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_ones( + int rows, + int cols, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_ones( + rows, + cols, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_onesPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Int, ffi.Int, + ffi.Pointer)>>('cv_UMat_create_ones'); + late final _cv_UMat_create_ones = _cv_UMat_create_onesPtr.asFunction< + ffi.Pointer Function(int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_ones_1( + int ndims, + ffi.Pointer sizes, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_ones_1( + ndims, + sizes, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_ones_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Pointer, ffi.Int, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_ones_1'); + late final _cv_UMat_create_ones_1 = _cv_UMat_create_ones_1Ptr.asFunction< + ffi.Pointer Function( + int, ffi.Pointer, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_zeros( + int rows, + int cols, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_zeros( + rows, + cols, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_zerosPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Int, ffi.Int, + ffi.Pointer)>>('cv_UMat_create_zeros'); + late final _cv_UMat_create_zeros = _cv_UMat_create_zerosPtr.asFunction< + ffi.Pointer Function(int, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_UMat_create_zeros_1( + int ndims, + ffi.Pointer sizes, + int type, + int usageFlags, + ffi.Pointer rval, + ) { + return _cv_UMat_create_zeros_1( + ndims, + sizes, + type, + usageFlags, + rval, + ); + } + + late final _cv_UMat_create_zeros_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Pointer, ffi.Int, + ffi.Int, ffi.Pointer)>>('cv_UMat_create_zeros_1'); + late final _cv_UMat_create_zeros_1 = _cv_UMat_create_zeros_1Ptr.asFunction< + ffi.Pointer Function( + int, ffi.Pointer, int, int, ffi.Pointer)>(); + + void cv_UMat_deallocate( + UMat self, + ) { + return _cv_UMat_deallocate( + self, + ); + } + + late final _cv_UMat_deallocatePtr = + _lookup>( + 'cv_UMat_deallocate'); + late final _cv_UMat_deallocate = + _cv_UMat_deallocatePtr.asFunction(); + + int cv_UMat_depth( + UMat self, + ) { + return _cv_UMat_depth( + self, + ); + } + + late final _cv_UMat_depthPtr = + _lookup>('cv_UMat_depth'); + late final _cv_UMat_depth = + _cv_UMat_depthPtr.asFunction(); + + ffi.Pointer cv_UMat_diag( + UMat self, + int d, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_diag( + self, + d, + rval, + callback, + ); + } + + late final _cv_UMat_diagPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_diag'); + late final _cv_UMat_diag = _cv_UMat_diagPtr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, imp$1.CvCallback_0)>(); + + int cv_UMat_dims( + UMat self, + ) { + return _cv_UMat_dims( + self, + ); + } + + late final _cv_UMat_dimsPtr = + _lookup>('cv_UMat_dims'); + late final _cv_UMat_dims = _cv_UMat_dimsPtr.asFunction(); + + ffi.Pointer cv_UMat_dot( + UMat self, + UMat m, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_dot( + self, + m, + rval, + callback, + ); + } + + late final _cv_UMat_dotPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, UMat, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_dot'); + late final _cv_UMat_dot = _cv_UMat_dotPtr.asFunction< + ffi.Pointer Function( + UMat, UMat, ffi.Pointer, imp$1.CvCallback_0)>(); + + int cv_UMat_elemSize( + UMat self, + ) { + return _cv_UMat_elemSize( + self, + ); + } + + late final _cv_UMat_elemSizePtr = + _lookup>('cv_UMat_elemSize'); + late final _cv_UMat_elemSize = + _cv_UMat_elemSizePtr.asFunction(); + + int cv_UMat_elemSize1( + UMat self, + ) { + return _cv_UMat_elemSize1( + self, + ); + } + + late final _cv_UMat_elemSize1Ptr = + _lookup>('cv_UMat_elemSize1'); + late final _cv_UMat_elemSize1 = + _cv_UMat_elemSize1Ptr.asFunction(); + + bool cv_UMat_empty( + UMat self, + ) { + return _cv_UMat_empty( + self, + ); + } + + late final _cv_UMat_emptyPtr = + _lookup>('cv_UMat_empty'); + late final _cv_UMat_empty = + _cv_UMat_emptyPtr.asFunction(); + + int cv_UMat_flags( + UMat self, + ) { + return _cv_UMat_flags( + self, + ); + } + + late final _cv_UMat_flagsPtr = + _lookup>('cv_UMat_flags'); + late final _cv_UMat_flags = + _cv_UMat_flagsPtr.asFunction(); + + ffi.Pointer cv_UMat_getMat( + UMat self, + int accessFlags, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_getMat( + self, + accessFlags, + rval, + callback, + ); + } + + late final _cv_UMat_getMatPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_getMat'); + late final _cv_UMat_getMat = _cv_UMat_getMatPtr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_handle( + UMat self, + int accessFlags, + ) { + return _cv_UMat_handle( + self, + accessFlags, + ); + } + + late final _cv_UMat_handlePtr = _lookup< + ffi.NativeFunction Function(UMat, ffi.Int)>>( + 'cv_UMat_handle'); + late final _cv_UMat_handle = _cv_UMat_handlePtr + .asFunction Function(UMat, int)>(); + + ffi.Pointer cv_UMat_inv( + UMat self, + int method, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_inv( + self, + method, + rval, + callback, + ); + } + + late final _cv_UMat_invPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_inv'); + late final _cv_UMat_inv = _cv_UMat_invPtr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, imp$1.CvCallback_0)>(); + + bool cv_UMat_isContinuous( + UMat self, + ) { + return _cv_UMat_isContinuous( + self, + ); + } + + late final _cv_UMat_isContinuousPtr = + _lookup>( + 'cv_UMat_isContinuous'); + late final _cv_UMat_isContinuous = + _cv_UMat_isContinuousPtr.asFunction(); + + bool cv_UMat_isSubmatrix( + UMat self, + ) { + return _cv_UMat_isSubmatrix( + self, + ); + } + + late final _cv_UMat_isSubmatrixPtr = + _lookup>( + 'cv_UMat_isSubmatrix'); + late final _cv_UMat_isSubmatrix = + _cv_UMat_isSubmatrixPtr.asFunction(); + + ffi.Pointer cv_UMat_mul( + UMat self, + UMat m, + double scale, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_mul( + self, + m, + scale, + rval, + callback, + ); + } + + late final _cv_UMat_mulPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(UMat, UMat, ffi.Double, + ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_mul'); + late final _cv_UMat_mul = _cv_UMat_mulPtr.asFunction< + ffi.Pointer Function( + UMat, UMat, double, ffi.Pointer, imp$1.CvCallback_0)>(); + + int cv_UMat_offset( + UMat self, + ) { + return _cv_UMat_offset( + self, + ); + } + + late final _cv_UMat_offsetPtr = + _lookup>('cv_UMat_offset'); + late final _cv_UMat_offset = + _cv_UMat_offsetPtr.asFunction(); + + void cv_UMat_release( + UMat self, + ) { + return _cv_UMat_release( + self, + ); + } + + late final _cv_UMat_releasePtr = + _lookup>('cv_UMat_release'); + late final _cv_UMat_release = + _cv_UMat_releasePtr.asFunction(); + + ffi.Pointer cv_UMat_reshape( + UMat self, + int cn, + int rows, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_UMat_reshape( + self, + cn, + rows, + rval, + callback, + ); + } + + late final _cv_UMat_reshapePtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function( - RNG, ffi.Double, ffi.Pointer)>>('cv_RNG_gaussian'); - late final _cv_RNG_gaussian = _cv_RNG_gaussianPtr.asFunction< - ffi.Pointer Function(RNG, double, ffi.Pointer)>(); + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, + ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_reshape'); + late final _cv_UMat_reshape = _cv_UMat_reshapePtr.asFunction< + ffi.Pointer Function( + UMat, int, int, ffi.Pointer, imp$1.CvCallback_0)>(); - ffi.Pointer cv_RNG_next( - RNG rng, - ffi.Pointer rval, + ffi.Pointer cv_UMat_reshape_2( + UMat self, + int cn, + int newndims, + ffi.Pointer newsz, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, ) { - return _cv_RNG_next( - rng, + return _cv_UMat_reshape_2( + self, + cn, + newndims, + newsz, rval, + callback, ); } - late final _cv_RNG_nextPtr = _lookup< + late final _cv_UMat_reshape_2Ptr = _lookup< ffi.NativeFunction< ffi.Pointer Function( - RNG, ffi.Pointer)>>('cv_RNG_next'); - late final _cv_RNG_next = _cv_RNG_nextPtr.asFunction< - ffi.Pointer Function(RNG, ffi.Pointer)>(); - - ffi.Pointer cv_RNG_uniform( - RNG rng, - int a, - int b, - ffi.Pointer rval, + UMat, + ffi.Int, + ffi.Int, + ffi.Pointer, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_reshape_2'); + late final _cv_UMat_reshape_2 = _cv_UMat_reshape_2Ptr.asFunction< + ffi.Pointer Function(UMat, int, int, ffi.Pointer, + ffi.Pointer, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_UMat_row( + UMat self, + int y, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, ) { - return _cv_RNG_uniform( - rng, - a, - b, + return _cv_UMat_row( + self, + y, rval, + callback, ); } - late final _cv_RNG_uniformPtr = _lookup< + late final _cv_UMat_rowPtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function( - RNG, ffi.Int, ffi.Int, ffi.Pointer)>>('cv_RNG_uniform'); - late final _cv_RNG_uniform = _cv_RNG_uniformPtr.asFunction< - ffi.Pointer Function(RNG, int, int, ffi.Pointer)>(); + ffi.Pointer Function(UMat, ffi.Int, ffi.Pointer, + imp$1.CvCallback_0)>>('cv_UMat_row'); + late final _cv_UMat_row = _cv_UMat_rowPtr.asFunction< + ffi.Pointer Function( + UMat, int, ffi.Pointer, imp$1.CvCallback_0)>(); - ffi.Pointer cv_RNG_uniformDouble( - RNG rng, - double a, - double b, - ffi.Pointer rval, + ffi.Pointer cv_UMat_rowRange( + UMat self, + int startrow, + int endrow, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, ) { - return _cv_RNG_uniformDouble( - rng, - a, - b, + return _cv_UMat_rowRange( + self, + startrow, + endrow, rval, + callback, ); } - late final _cv_RNG_uniformDoublePtr = _lookup< + late final _cv_UMat_rowRangePtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(RNG, ffi.Double, ffi.Double, - ffi.Pointer)>>('cv_RNG_uniformDouble'); - late final _cv_RNG_uniformDouble = _cv_RNG_uniformDoublePtr.asFunction< + ffi.Pointer Function(UMat, ffi.Int, ffi.Int, + ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_rowRange'); + late final _cv_UMat_rowRange = _cv_UMat_rowRangePtr.asFunction< ffi.Pointer Function( - RNG, double, double, ffi.Pointer)>(); + UMat, int, int, ffi.Pointer, imp$1.CvCallback_0)>(); - ffi.Pointer cv_RotatedRect_boundingRect( - RotatedRect rect, - ffi.Pointer rval, + int cv_UMat_rows( + UMat self, ) { - return _cv_RotatedRect_boundingRect( - rect, - rval, + return _cv_UMat_rows( + self, ); } - late final _cv_RotatedRect_boundingRectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(RotatedRect, - ffi.Pointer)>>('cv_RotatedRect_boundingRect'); - late final _cv_RotatedRect_boundingRect = - _cv_RotatedRect_boundingRectPtr.asFunction< - ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + late final _cv_UMat_rowsPtr = + _lookup>('cv_UMat_rows'); + late final _cv_UMat_rows = _cv_UMat_rowsPtr.asFunction(); - ffi.Pointer cv_RotatedRect_boundingRect2f( - RotatedRect rect, - ffi.Pointer rval, + ffi.Pointer cv_UMat_setTo( + UMat self, + Scalar s, + UMat mask, + imp$1.CvCallback_0 callback, ) { - return _cv_RotatedRect_boundingRect2f( - rect, - rval, + return _cv_UMat_setTo( + self, + s, + mask, + callback, ); } - late final _cv_RotatedRect_boundingRect2fPtr = _lookup< + late final _cv_UMat_setToPtr = _lookup< ffi.NativeFunction< - ffi.Pointer Function(RotatedRect, - ffi.Pointer)>>('cv_RotatedRect_boundingRect2f'); - late final _cv_RotatedRect_boundingRect2f = - _cv_RotatedRect_boundingRect2fPtr.asFunction< - ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + ffi.Pointer Function( + UMat, Scalar, UMat, imp$1.CvCallback_0)>>('cv_UMat_setTo'); + late final _cv_UMat_setTo = _cv_UMat_setToPtr.asFunction< + ffi.Pointer Function(UMat, Scalar, UMat, imp$1.CvCallback_0)>(); - ffi.Pointer cv_RotatedRect_points( - RotatedRect rect, - ffi.Pointer out_pts, + ffi.Pointer cv_UMat_size( + UMat self, ) { - return _cv_RotatedRect_points( - rect, - out_pts, + return _cv_UMat_size( + self, ); } - late final _cv_RotatedRect_pointsPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - RotatedRect, ffi.Pointer)>>('cv_RotatedRect_points'); - late final _cv_RotatedRect_points = _cv_RotatedRect_pointsPtr.asFunction< - ffi.Pointer Function(RotatedRect, ffi.Pointer)>(); + late final _cv_UMat_sizePtr = + _lookup Function(UMat)>>( + 'cv_UMat_size'); + late final _cv_UMat_size = + _cv_UMat_sizePtr.asFunction Function(UMat)>(); - ffi.Pointer cv_SVBackSubst( - Mat w, - Mat u, - Mat vt, - Mat rhs, - Mat dst, + MatStep cv_UMat_step( + UMat self, + ) { + return _cv_UMat_step( + self, + ); + } + + late final _cv_UMat_stepPtr = + _lookup>('cv_UMat_step'); + late final _cv_UMat_step = + _cv_UMat_stepPtr.asFunction(); + + int cv_UMat_step1( + UMat self, + int i, + ) { + return _cv_UMat_step1( + self, + i, + ); + } + + late final _cv_UMat_step1Ptr = + _lookup>( + 'cv_UMat_step1'); + late final _cv_UMat_step1 = + _cv_UMat_step1Ptr.asFunction(); + + ffi.Pointer cv_UMat_t( + UMat self, + ffi.Pointer rval, imp$1.CvCallback_0 callback, ) { - return _cv_SVBackSubst( - w, - u, - vt, - rhs, - dst, + return _cv_UMat_t( + self, + rval, callback, ); } - late final _cv_SVBackSubstPtr = _lookup< + late final _cv_UMat_tPtr = _lookup< ffi.NativeFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>>('cv_SVBackSubst'); - late final _cv_SVBackSubst = _cv_SVBackSubstPtr.asFunction< + UMat, ffi.Pointer, imp$1.CvCallback_0)>>('cv_UMat_t'); + late final _cv_UMat_t = _cv_UMat_tPtr.asFunction< ffi.Pointer Function( - Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>(); + UMat, ffi.Pointer, imp$1.CvCallback_0)>(); - ffi.Pointer cv_SVD_Compute( - Mat src, - Mat w_r, - Mat u_r, - Mat vt_r, - int flags, - imp$1.CvCallback_0 callback, + int cv_UMat_total( + UMat self, ) { - return _cv_SVD_Compute( - src, - w_r, - u_r, - vt_r, - flags, - callback, + return _cv_UMat_total( + self, ); } - late final _cv_SVD_ComputePtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, ffi.Int, - imp$1.CvCallback_0)>>('cv_SVD_Compute'); - late final _cv_SVD_Compute = _cv_SVD_ComputePtr.asFunction< - ffi.Pointer Function( - Mat, Mat, Mat, Mat, int, imp$1.CvCallback_0)>(); + late final _cv_UMat_totalPtr = + _lookup>('cv_UMat_total'); + late final _cv_UMat_total = + _cv_UMat_totalPtr.asFunction(); - ffi.Pointer cv_SVD_backSubst( - Mat w, - Mat u, - Mat vt, - Mat rhs, - Mat dst, - imp$1.CvCallback_0 callback, + int cv_UMat_type( + UMat self, ) { - return _cv_SVD_backSubst( - w, - u, - vt, - rhs, - dst, - callback, + return _cv_UMat_type( + self, ); } - late final _cv_SVD_backSubstPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(Mat, Mat, Mat, Mat, Mat, - imp$1.CvCallback_0)>>('cv_SVD_backSubst'); - late final _cv_SVD_backSubst = _cv_SVD_backSubstPtr.asFunction< - ffi.Pointer Function( - Mat, Mat, Mat, Mat, Mat, imp$1.CvCallback_0)>(); + late final _cv_UMat_typePtr = + _lookup>('cv_UMat_type'); + late final _cv_UMat_type = _cv_UMat_typePtr.asFunction(); - ffi.Pointer cv_SVDecomp( - Mat w, - Mat u, - Mat vt, - Mat d, - int flags, - imp$1.CvCallback_0 callback, + int cv_UMat_usageFlags( + UMat self, ) { - return _cv_SVDecomp( - w, - u, - vt, - d, - flags, - callback, + return _cv_UMat_usageFlags( + self, ); } - late final _cv_SVDecompPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - Mat, Mat, Mat, Mat, ffi.Int, imp$1.CvCallback_0)>>('cv_SVDecomp'); - late final _cv_SVDecomp = _cv_SVDecompPtr.asFunction< - ffi.Pointer Function( - Mat, Mat, Mat, Mat, int, imp$1.CvCallback_0)>(); + late final _cv_UMat_usageFlagsPtr = + _lookup>('cv_UMat_usageFlags'); + late final _cv_UMat_usageFlags = + _cv_UMat_usageFlagsPtr.asFunction(); ffi.Pointer cv_absdiff( Mat src1, @@ -6463,6 +7591,15 @@ class CvNativeCore { late final _getLogCallback = _getLogCallbackPtr.asFunction(); + LogCallbackEx getLogCallbackEx() { + return _getLogCallbackEx(); + } + + late final _getLogCallbackExPtr = + _lookup>('getLogCallbackEx'); + late final _getLogCallbackEx = + _getLogCallbackExPtr.asFunction(); + ffi.Pointer getLogLevel( ffi.Pointer logLevel, ) { @@ -6484,6 +7621,13 @@ class CvNativeCore { set logCallback(LogCallback value) => _logCallback.value = value; + late final ffi.Pointer _logCallbackEx = + _lookup('logCallbackEx'); + + LogCallbackEx get logCallbackEx => _logCallbackEx.value; + + set logCallbackEx(LogCallbackEx value) => _logCallbackEx.value = value; + void registerErrorCallback( ErrorCallback callback, ) { @@ -6498,20 +7642,34 @@ class CvNativeCore { late final _registerErrorCallback = _registerErrorCallbackPtr.asFunction(); - ffi.Pointer replaceWriteLogMessageEx( + ffi.Pointer replaceWriteLogMessage( LogCallback callback, ) { - return _replaceWriteLogMessageEx( + return _replaceWriteLogMessage( callback, ); } - late final _replaceWriteLogMessageExPtr = + late final _replaceWriteLogMessagePtr = _lookup Function(LogCallback)>>( - 'replaceWriteLogMessageEx'); - late final _replaceWriteLogMessageEx = _replaceWriteLogMessageExPtr + 'replaceWriteLogMessage'); + late final _replaceWriteLogMessage = _replaceWriteLogMessagePtr .asFunction Function(LogCallback)>(); + ffi.Pointer replaceWriteLogMessageEx( + LogCallbackEx callback, + ) { + return _replaceWriteLogMessageEx( + callback, + ); + } + + late final _replaceWriteLogMessageExPtr = _lookup< + ffi.NativeFunction Function(LogCallbackEx)>>( + 'replaceWriteLogMessageEx'); + late final _replaceWriteLogMessageEx = _replaceWriteLogMessageExPtr + .asFunction Function(LogCallbackEx)>(); + void setLogCallback( LogCallback callback, ) { @@ -6526,6 +7684,20 @@ class CvNativeCore { late final _setLogCallback = _setLogCallbackPtr.asFunction(); + void setLogCallbackEx( + LogCallbackEx callback, + ) { + return _setLogCallbackEx( + callback, + ); + } + + late final _setLogCallbackExPtr = + _lookup>( + 'setLogCallbackEx'); + late final _setLogCallbackEx = + _setLogCallbackExPtr.asFunction(); + ffi.Pointer setLogLevel( int logLevel, ) { @@ -14352,6 +15524,54 @@ class CvNativeCore { late final _std_VecVecPoint_shrink_to_fit = _std_VecVecPoint_shrink_to_fitPtr .asFunction)>(); + void writeLogMessage( + int logLevel, + ffi.Pointer message, + ) { + return _writeLogMessage( + logLevel, + message, + ); + } + + late final _writeLogMessagePtr = _lookup< + ffi + .NativeFunction)>>( + 'writeLogMessage'); + late final _writeLogMessage = _writeLogMessagePtr + .asFunction)>(); + + void writeLogMessageEx( + int logLevel, + ffi.Pointer tag, + ffi.Pointer file, + int line, + ffi.Pointer func, + ffi.Pointer message, + ) { + return _writeLogMessageEx( + logLevel, + tag, + file, + line, + func, + message, + ); + } + + late final _writeLogMessageExPtr = _lookup< + ffi.NativeFunction< + ffi.Void Function( + ffi.Int, + ffi.Pointer, + ffi.Pointer, + ffi.Int, + ffi.Pointer, + ffi.Pointer)>>('writeLogMessageEx'); + late final _writeLogMessageEx = _writeLogMessageExPtr.asFunction< + void Function(int, ffi.Pointer, ffi.Pointer, int, + ffi.Pointer, ffi.Pointer)>(); + late final addresses = _SymbolAddresses(this); } @@ -14366,6 +15586,8 @@ class _SymbolAddresses { get cv_Mat_closeVoid => _library._cv_Mat_closeVoidPtr; ffi.Pointer> get cv_RNG_close => _library._cv_RNG_closePtr; + ffi.Pointer> + get cv_UMat_close => _library._cv_UMat_closePtr; ffi.Pointer> get std_VecChar_free => _library._std_VecChar_freePtr; ffi.Pointer> @@ -14458,7 +15680,8 @@ typedef DartErrorCallbackFunction = void Function( ffi.Pointer userdata); typedef KeyPoint = imp$1.KeyPoint; typedef LogCallback = ffi.Pointer>; -typedef LogCallbackFunction = ffi.Void Function( +typedef LogCallbackEx = ffi.Pointer>; +typedef LogCallbackExFunction = ffi.Void Function( ffi.Int logLevel, ffi.Pointer tag, ffi.Size tagLen, @@ -14469,7 +15692,7 @@ typedef LogCallbackFunction = ffi.Void Function( ffi.Size funcLen, ffi.Pointer message, ffi.Size msgLen); -typedef DartLogCallbackFunction = void Function( +typedef DartLogCallbackExFunction = void Function( int logLevel, ffi.Pointer tag, int tagLen, @@ -14480,12 +15703,17 @@ typedef DartLogCallbackFunction = void Function( int funcLen, ffi.Pointer message, int msgLen); +typedef LogCallbackFunction = ffi.Void Function( + ffi.Int logLevel, ffi.Pointer message, ffi.Size msgLen); +typedef DartLogCallbackFunction = void Function( + int logLevel, ffi.Pointer message, int msgLen); typedef Mat = imp$1.Mat; typedef MatStep = imp$1.MatStep; typedef RNG = imp$1.RNG; typedef RotatedRect = imp$1.RotatedRect; typedef Scalar = imp$1.Scalar; typedef TermCriteria = imp$1.TermCriteria; +typedef UMat = imp$1.UMat; typedef Vec2b = imp$1.Vec2b; typedef Vec2d = imp$1.Vec2d; typedef Vec2f = imp$1.Vec2f; diff --git a/packages/dartcv/lib/src/g/core.yaml b/packages/dartcv/lib/src/g/core.yaml index 271e9c4b..3a52cf23 100644 --- a/packages/dartcv/lib/src/g/core.yaml +++ b/packages/dartcv/lib/src/g/core.yaml @@ -7,6 +7,9 @@ files: ErrorCallbackFunction: name: ErrorCallbackFunction dart-name: DartErrorCallbackFunction + LogCallbackExFunction: + name: LogCallbackExFunction + dart-name: DartLogCallbackExFunction LogCallbackFunction: name: LogCallbackFunction dart-name: DartLogCallbackFunction @@ -82,6 +85,8 @@ files: name: cv_Mat_eye c:@F@cv_Mat_flags: name: cv_Mat_flags + c:@F@cv_Mat_getUMat: + name: cv_Mat_getUMat c:@F@cv_Mat_get_Vec2b: name: cv_Mat_get_Vec2b c:@F@cv_Mat_get_Vec2d: @@ -424,6 +429,122 @@ files: name: cv_SVD_backSubst c:@F@cv_SVDecomp: name: cv_SVDecomp + c:@F@cv_UMat_addref: + name: cv_UMat_addref + c:@F@cv_UMat_channels: + name: cv_UMat_channels + c:@F@cv_UMat_checkVector: + name: cv_UMat_checkVector + c:@F@cv_UMat_clone: + name: cv_UMat_clone + c:@F@cv_UMat_close: + name: cv_UMat_close + c:@F@cv_UMat_col: + name: cv_UMat_col + c:@F@cv_UMat_colRange: + name: cv_UMat_colRange + c:@F@cv_UMat_cols: + name: cv_UMat_cols + c:@F@cv_UMat_convertTo: + name: cv_UMat_convertTo + c:@F@cv_UMat_copyTo: + name: cv_UMat_copyTo + c:@F@cv_UMat_copyTo_2: + name: cv_UMat_copyTo_2 + c:@F@cv_UMat_createFunc: + name: cv_UMat_createFunc + c:@F@cv_UMat_createFunc_2: + name: cv_UMat_createFunc_2 + c:@F@cv_UMat_create_1: + name: cv_UMat_create_1 + c:@F@cv_UMat_create_2: + name: cv_UMat_create_2 + c:@F@cv_UMat_create_3: + name: cv_UMat_create_3 + c:@F@cv_UMat_create_4: + name: cv_UMat_create_4 + c:@F@cv_UMat_create_5: + name: cv_UMat_create_5 + c:@F@cv_UMat_create_6: + name: cv_UMat_create_6 + c:@F@cv_UMat_create_7: + name: cv_UMat_create_7 + c:@F@cv_UMat_create_8: + name: cv_UMat_create_8 + c:@F@cv_UMat_create_9: + name: cv_UMat_create_9 + c:@F@cv_UMat_create_diag: + name: cv_UMat_create_diag + c:@F@cv_UMat_create_eye: + name: cv_UMat_create_eye + c:@F@cv_UMat_create_ones: + name: cv_UMat_create_ones + c:@F@cv_UMat_create_ones_1: + name: cv_UMat_create_ones_1 + c:@F@cv_UMat_create_zeros: + name: cv_UMat_create_zeros + c:@F@cv_UMat_create_zeros_1: + name: cv_UMat_create_zeros_1 + c:@F@cv_UMat_deallocate: + name: cv_UMat_deallocate + c:@F@cv_UMat_depth: + name: cv_UMat_depth + c:@F@cv_UMat_diag: + name: cv_UMat_diag + c:@F@cv_UMat_dims: + name: cv_UMat_dims + c:@F@cv_UMat_dot: + name: cv_UMat_dot + c:@F@cv_UMat_elemSize: + name: cv_UMat_elemSize + c:@F@cv_UMat_elemSize1: + name: cv_UMat_elemSize1 + c:@F@cv_UMat_empty: + name: cv_UMat_empty + c:@F@cv_UMat_flags: + name: cv_UMat_flags + c:@F@cv_UMat_getMat: + name: cv_UMat_getMat + c:@F@cv_UMat_handle: + name: cv_UMat_handle + c:@F@cv_UMat_inv: + name: cv_UMat_inv + c:@F@cv_UMat_isContinuous: + name: cv_UMat_isContinuous + c:@F@cv_UMat_isSubmatrix: + name: cv_UMat_isSubmatrix + c:@F@cv_UMat_mul: + name: cv_UMat_mul + c:@F@cv_UMat_offset: + name: cv_UMat_offset + c:@F@cv_UMat_release: + name: cv_UMat_release + c:@F@cv_UMat_reshape: + name: cv_UMat_reshape + c:@F@cv_UMat_reshape_2: + name: cv_UMat_reshape_2 + c:@F@cv_UMat_row: + name: cv_UMat_row + c:@F@cv_UMat_rowRange: + name: cv_UMat_rowRange + c:@F@cv_UMat_rows: + name: cv_UMat_rows + c:@F@cv_UMat_setTo: + name: cv_UMat_setTo + c:@F@cv_UMat_size: + name: cv_UMat_size + c:@F@cv_UMat_step: + name: cv_UMat_step + c:@F@cv_UMat_step1: + name: cv_UMat_step1 + c:@F@cv_UMat_t: + name: cv_UMat_t + c:@F@cv_UMat_total: + name: cv_UMat_total + c:@F@cv_UMat_type: + name: cv_UMat_type + c:@F@cv_UMat_usageFlags: + name: cv_UMat_usageFlags c:@F@cv_absdiff: name: cv_absdiff c:@F@cv_add: @@ -634,14 +755,20 @@ files: name: getCvVersion c:@F@getLogCallback: name: getLogCallback + c:@F@getLogCallbackEx: + name: getLogCallbackEx c:@F@getLogLevel: name: getLogLevel c:@F@registerErrorCallback: name: registerErrorCallback + c:@F@replaceWriteLogMessage: + name: replaceWriteLogMessage c:@F@replaceWriteLogMessageEx: name: replaceWriteLogMessageEx c:@F@setLogCallback: name: setLogCallback + c:@F@setLogCallbackEx: + name: setLogCallbackEx c:@F@setLogLevel: name: setLogLevel c:@F@std_VecChar_clear: @@ -1626,12 +1753,20 @@ files: name: std_VecVecPoint_set c:@F@std_VecVecPoint_shrink_to_fit: name: std_VecVecPoint_shrink_to_fit + c:@F@writeLogMessage: + name: writeLogMessage + c:@F@writeLogMessageEx: + name: writeLogMessageEx c:@logCallback: name: logCallback + c:@logCallbackEx: + name: logCallbackEx c:exception.h@T@ErrorCallback: name: ErrorCallback c:logging.h@T@LogCallback: name: LogCallback + c:logging.h@T@LogCallbackEx: + name: LogCallbackEx c:math.h@T@double_t: name: double_t dart-name: Dartdouble_t @@ -1670,6 +1805,8 @@ files: name: Scalar c:types.h@T@TermCriteria: name: TermCriteria + c:types.h@T@UMat: + name: UMat c:types.h@T@Vec2b: name: Vec2b c:types.h@T@Vec2d: diff --git a/packages/dartcv/lib/src/g/dnn.g.dart b/packages/dartcv/lib/src/g/dnn.g.dart index 78492fa0..426b2de2 100644 --- a/packages/dartcv/lib/src/g/dnn.g.dart +++ b/packages/dartcv/lib/src/g/dnn.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/features2d.g.dart b/packages/dartcv/lib/src/g/features2d.g.dart index 53a0c023..843a20e9 100644 --- a/packages/dartcv/lib/src/g/features2d.g.dart +++ b/packages/dartcv/lib/src/g/features2d.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 @@ -59,16 +58,58 @@ class CvNativeFeatures2d { late final _cv_AKAZE_create = _cv_AKAZE_createPtr .asFunction Function(ffi.Pointer)>(); + ffi.Pointer cv_AKAZE_create_1( + int descriptor_type, + int descriptor_size, + int descriptor_channels, + double threshold, + int nOctaves, + int nOctaveLayers, + int diffusivity, + int max_points, + ffi.Pointer rval, + ) { + return _cv_AKAZE_create_1( + descriptor_type, + descriptor_size, + descriptor_channels, + threshold, + nOctaves, + nOctaveLayers, + diffusivity, + max_points, + rval, + ); + } + + late final _cv_AKAZE_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Float, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Pointer)>>('cv_AKAZE_create_1'); + late final _cv_AKAZE_create_1 = _cv_AKAZE_create_1Ptr.asFunction< + ffi.Pointer Function( + int, int, int, double, int, int, int, int, ffi.Pointer)>(); + ffi.Pointer cv_AKAZE_detect( AKAZE self, Mat src, ffi.Pointer rval, + Mat mask, imp$1.CvCallback_0 callback, ) { return _cv_AKAZE_detect( self, src, rval, + mask, callback, ); } @@ -76,10 +117,10 @@ class CvNativeFeatures2d { late final _cv_AKAZE_detectPtr = _lookup< ffi.NativeFunction< ffi.Pointer Function(AKAZE, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_AKAZE_detect'); + Mat, imp$1.CvCallback_0)>>('cv_AKAZE_detect'); late final _cv_AKAZE_detect = _cv_AKAZE_detectPtr.asFunction< ffi.Pointer Function( - AKAZE, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + AKAZE, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); ffi.Pointer cv_AKAZE_detectAndCompute( AKAZE self, @@ -87,6 +128,7 @@ class CvNativeFeatures2d { Mat mask, Mat desc, ffi.Pointer rval, + bool useProvidedKeypoints, imp$1.CvCallback_0 callback, ) { return _cv_AKAZE_detectAndCompute( @@ -95,6 +137,7 @@ class CvNativeFeatures2d { mask, desc, rval, + useProvidedKeypoints, callback, ); } @@ -107,734 +150,2888 @@ class CvNativeFeatures2d { Mat, Mat, ffi.Pointer, + ffi.Bool, imp$1.CvCallback_0)>>('cv_AKAZE_detectAndCompute'); late final _cv_AKAZE_detectAndCompute = _cv_AKAZE_detectAndComputePtr.asFunction< ffi.Pointer Function(AKAZE, Mat, Mat, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + ffi.Pointer, bool, imp$1.CvCallback_0)>(); - void cv_AgastFeatureDetector_close( - AgastFeatureDetectorPtr self, + bool cv_AKAZE_empty( + AKAZE self, ) { - return _cv_AgastFeatureDetector_close( + return _cv_AKAZE_empty( self, ); } - late final _cv_AgastFeatureDetector_closePtr = - _lookup>( - 'cv_AgastFeatureDetector_close'); - late final _cv_AgastFeatureDetector_close = _cv_AgastFeatureDetector_closePtr - .asFunction(); + late final _cv_AKAZE_emptyPtr = + _lookup>('cv_AKAZE_empty'); + late final _cv_AKAZE_empty = + _cv_AKAZE_emptyPtr.asFunction(); - ffi.Pointer cv_AgastFeatureDetector_create( - ffi.Pointer rval, + ffi.Pointer cv_AKAZE_getDefaultName( + AKAZE self, ) { - return _cv_AgastFeatureDetector_create( - rval, + return _cv_AKAZE_getDefaultName( + self, ); } - late final _cv_AgastFeatureDetector_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer)>>( - 'cv_AgastFeatureDetector_create'); - late final _cv_AgastFeatureDetector_create = - _cv_AgastFeatureDetector_createPtr.asFunction< - ffi.Pointer Function(ffi.Pointer)>(); + late final _cv_AKAZE_getDefaultNamePtr = + _lookup Function(AKAZE)>>( + 'cv_AKAZE_getDefaultName'); + late final _cv_AKAZE_getDefaultName = _cv_AKAZE_getDefaultNamePtr + .asFunction Function(AKAZE)>(); - ffi.Pointer cv_AgastFeatureDetector_detect( - AgastFeatureDetector self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_AKAZE_getDescriptorChannels( + AKAZE self, ) { - return _cv_AgastFeatureDetector_detect( + return _cv_AKAZE_getDescriptorChannels( self, - src, - rval, - callback, ); } - late final _cv_AgastFeatureDetector_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - AgastFeatureDetector, - Mat, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_AgastFeatureDetector_detect'); - late final _cv_AgastFeatureDetector_detect = - _cv_AgastFeatureDetector_detectPtr.asFunction< - ffi.Pointer Function(AgastFeatureDetector, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_AKAZE_getDescriptorChannelsPtr = + _lookup>( + 'cv_AKAZE_getDescriptorChannels'); + late final _cv_AKAZE_getDescriptorChannels = + _cv_AKAZE_getDescriptorChannelsPtr.asFunction(); - void cv_BFMatcher_close( - BFMatcherPtr self, + int cv_AKAZE_getDescriptorSize( + AKAZE self, ) { - return _cv_BFMatcher_close( + return _cv_AKAZE_getDescriptorSize( self, ); } - late final _cv_BFMatcher_closePtr = - _lookup>( - 'cv_BFMatcher_close'); - late final _cv_BFMatcher_close = - _cv_BFMatcher_closePtr.asFunction(); + late final _cv_AKAZE_getDescriptorSizePtr = + _lookup>( + 'cv_AKAZE_getDescriptorSize'); + late final _cv_AKAZE_getDescriptorSize = + _cv_AKAZE_getDescriptorSizePtr.asFunction(); - ffi.Pointer cv_BFMatcher_create( - ffi.Pointer rval, + int cv_AKAZE_getDescriptorType( + AKAZE self, ) { - return _cv_BFMatcher_create( - rval, + return _cv_AKAZE_getDescriptorType( + self, ); } - late final _cv_BFMatcher_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer)>>('cv_BFMatcher_create'); - late final _cv_BFMatcher_create = _cv_BFMatcher_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_AKAZE_getDescriptorTypePtr = + _lookup>( + 'cv_AKAZE_getDescriptorType'); + late final _cv_AKAZE_getDescriptorType = + _cv_AKAZE_getDescriptorTypePtr.asFunction(); - ffi.Pointer cv_BFMatcher_create_1( - int normType, - bool crossCheck, - ffi.Pointer rval, + int cv_AKAZE_getDiffusivity( + AKAZE self, ) { - return _cv_BFMatcher_create_1( - normType, - crossCheck, - rval, + return _cv_AKAZE_getDiffusivity( + self, ); } - late final _cv_BFMatcher_create_1Ptr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(ffi.Int, ffi.Bool, - ffi.Pointer)>>('cv_BFMatcher_create_1'); - late final _cv_BFMatcher_create_1 = _cv_BFMatcher_create_1Ptr.asFunction< - ffi.Pointer Function(int, bool, ffi.Pointer)>(); + late final _cv_AKAZE_getDiffusivityPtr = + _lookup>( + 'cv_AKAZE_getDiffusivity'); + late final _cv_AKAZE_getDiffusivity = + _cv_AKAZE_getDiffusivityPtr.asFunction(); - ffi.Pointer cv_BFMatcher_knnMatch( - BFMatcher self, - Mat query, - Mat train, - int k, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_AKAZE_getMaxPoints( + AKAZE self, ) { - return _cv_BFMatcher_knnMatch( + return _cv_AKAZE_getMaxPoints( self, - query, - train, - k, - rval, - callback, ); } - late final _cv_BFMatcher_knnMatchPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - BFMatcher, - Mat, - Mat, - ffi.Int, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_BFMatcher_knnMatch'); - late final _cv_BFMatcher_knnMatch = _cv_BFMatcher_knnMatchPtr.asFunction< - ffi.Pointer Function(BFMatcher, Mat, Mat, int, - ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_AKAZE_getMaxPointsPtr = + _lookup>( + 'cv_AKAZE_getMaxPoints'); + late final _cv_AKAZE_getMaxPoints = + _cv_AKAZE_getMaxPointsPtr.asFunction(); - ffi.Pointer cv_BFMatcher_match( - BFMatcher self, - Mat query, - Mat train, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_AKAZE_getNOctaveLayers( + AKAZE self, ) { - return _cv_BFMatcher_match( + return _cv_AKAZE_getNOctaveLayers( self, - query, - train, - rval, - callback, ); } - late final _cv_BFMatcher_matchPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - BFMatcher, - Mat, - Mat, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_BFMatcher_match'); - late final _cv_BFMatcher_match = _cv_BFMatcher_matchPtr.asFunction< - ffi.Pointer Function( - BFMatcher, Mat, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_AKAZE_getNOctaveLayersPtr = + _lookup>( + 'cv_AKAZE_getNOctaveLayers'); + late final _cv_AKAZE_getNOctaveLayers = + _cv_AKAZE_getNOctaveLayersPtr.asFunction(); - void cv_BRISK_close( - BRISKPtr self, + int cv_AKAZE_getNOctaves( + AKAZE self, ) { - return _cv_BRISK_close( + return _cv_AKAZE_getNOctaves( self, ); } - late final _cv_BRISK_closePtr = - _lookup>( - 'cv_BRISK_close'); - late final _cv_BRISK_close = - _cv_BRISK_closePtr.asFunction(); + late final _cv_AKAZE_getNOctavesPtr = + _lookup>( + 'cv_AKAZE_getNOctaves'); + late final _cv_AKAZE_getNOctaves = + _cv_AKAZE_getNOctavesPtr.asFunction(); - ffi.Pointer cv_BRISK_create( - ffi.Pointer rval, + double cv_AKAZE_getThreshold( + AKAZE self, ) { - return _cv_BRISK_create( - rval, + return _cv_AKAZE_getThreshold( + self, ); } - late final _cv_BRISK_createPtr = _lookup< - ffi - .NativeFunction Function(ffi.Pointer)>>( - 'cv_BRISK_create'); - late final _cv_BRISK_create = _cv_BRISK_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_AKAZE_getThresholdPtr = + _lookup>( + 'cv_AKAZE_getThreshold'); + late final _cv_AKAZE_getThreshold = + _cv_AKAZE_getThresholdPtr.asFunction(); - ffi.Pointer cv_BRISK_detect( - BRISK self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + void cv_AKAZE_setDescriptorChannels( + AKAZE self, + int dch, ) { - return _cv_BRISK_detect( + return _cv_AKAZE_setDescriptorChannels( self, - src, - rval, - callback, + dch, ); } - late final _cv_BRISK_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(BRISK, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_BRISK_detect'); - late final _cv_BRISK_detect = _cv_BRISK_detectPtr.asFunction< - ffi.Pointer Function( - BRISK, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_AKAZE_setDescriptorChannelsPtr = + _lookup>( + 'cv_AKAZE_setDescriptorChannels'); + late final _cv_AKAZE_setDescriptorChannels = + _cv_AKAZE_setDescriptorChannelsPtr + .asFunction(); - ffi.Pointer cv_BRISK_detectAndCompute( - BRISK self, - Mat src, - Mat mask, - Mat desc, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + void cv_AKAZE_setDescriptorSize( + AKAZE self, + int dsize, ) { - return _cv_BRISK_detectAndCompute( + return _cv_AKAZE_setDescriptorSize( self, - src, - mask, - desc, - rval, - callback, + dsize, ); } - late final _cv_BRISK_detectAndComputePtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - BRISK, - Mat, - Mat, - Mat, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_BRISK_detectAndCompute'); - late final _cv_BRISK_detectAndCompute = - _cv_BRISK_detectAndComputePtr.asFunction< - ffi.Pointer Function(BRISK, Mat, Mat, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_AKAZE_setDescriptorSizePtr = + _lookup>( + 'cv_AKAZE_setDescriptorSize'); + late final _cv_AKAZE_setDescriptorSize = + _cv_AKAZE_setDescriptorSizePtr.asFunction(); - void cv_FastFeatureDetector_close( - FastFeatureDetectorPtr self, + void cv_AKAZE_setDescriptorType( + AKAZE self, + int dtype, ) { - return _cv_FastFeatureDetector_close( + return _cv_AKAZE_setDescriptorType( self, + dtype, ); } - late final _cv_FastFeatureDetector_closePtr = - _lookup>( - 'cv_FastFeatureDetector_close'); - late final _cv_FastFeatureDetector_close = _cv_FastFeatureDetector_closePtr - .asFunction(); + late final _cv_AKAZE_setDescriptorTypePtr = + _lookup>( + 'cv_AKAZE_setDescriptorType'); + late final _cv_AKAZE_setDescriptorType = + _cv_AKAZE_setDescriptorTypePtr.asFunction(); - ffi.Pointer cv_FastFeatureDetector_create( - ffi.Pointer rval, + void cv_AKAZE_setDiffusivity( + AKAZE self, + int diff, ) { - return _cv_FastFeatureDetector_create( - rval, + return _cv_AKAZE_setDiffusivity( + self, + diff, ); } - late final _cv_FastFeatureDetector_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer)>>( - 'cv_FastFeatureDetector_create'); - late final _cv_FastFeatureDetector_create = - _cv_FastFeatureDetector_createPtr.asFunction< - ffi.Pointer Function(ffi.Pointer)>(); + late final _cv_AKAZE_setDiffusivityPtr = + _lookup>( + 'cv_AKAZE_setDiffusivity'); + late final _cv_AKAZE_setDiffusivity = + _cv_AKAZE_setDiffusivityPtr.asFunction(); - ffi.Pointer cv_FastFeatureDetector_create_1( - int threshold, - bool nonmaxSuppression, - int type, - ffi.Pointer rval, + void cv_AKAZE_setMaxPoints( + AKAZE self, + int max_points, ) { - return _cv_FastFeatureDetector_create_1( - threshold, - nonmaxSuppression, - type, - rval, + return _cv_AKAZE_setMaxPoints( + self, + max_points, ); } - late final _cv_FastFeatureDetector_create_1Ptr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(ffi.Int, ffi.Bool, ffi.Int, - ffi.Pointer)>>( - 'cv_FastFeatureDetector_create_1'); - late final _cv_FastFeatureDetector_create_1 = + late final _cv_AKAZE_setMaxPointsPtr = + _lookup>( + 'cv_AKAZE_setMaxPoints'); + late final _cv_AKAZE_setMaxPoints = + _cv_AKAZE_setMaxPointsPtr.asFunction(); + + void cv_AKAZE_setNOctaveLayers( + AKAZE self, + int octaveLayers, + ) { + return _cv_AKAZE_setNOctaveLayers( + self, + octaveLayers, + ); + } + + late final _cv_AKAZE_setNOctaveLayersPtr = + _lookup>( + 'cv_AKAZE_setNOctaveLayers'); + late final _cv_AKAZE_setNOctaveLayers = + _cv_AKAZE_setNOctaveLayersPtr.asFunction(); + + void cv_AKAZE_setNOctaves( + AKAZE self, + int octaves, + ) { + return _cv_AKAZE_setNOctaves( + self, + octaves, + ); + } + + late final _cv_AKAZE_setNOctavesPtr = + _lookup>( + 'cv_AKAZE_setNOctaves'); + late final _cv_AKAZE_setNOctaves = + _cv_AKAZE_setNOctavesPtr.asFunction(); + + void cv_AKAZE_setThreshold( + AKAZE self, + double threshold, + ) { + return _cv_AKAZE_setThreshold( + self, + threshold, + ); + } + + late final _cv_AKAZE_setThresholdPtr = + _lookup>( + 'cv_AKAZE_setThreshold'); + late final _cv_AKAZE_setThreshold = + _cv_AKAZE_setThresholdPtr.asFunction(); + + void cv_AgastFeatureDetector_close( + AgastFeatureDetectorPtr self, + ) { + return _cv_AgastFeatureDetector_close( + self, + ); + } + + late final _cv_AgastFeatureDetector_closePtr = + _lookup>( + 'cv_AgastFeatureDetector_close'); + late final _cv_AgastFeatureDetector_close = _cv_AgastFeatureDetector_closePtr + .asFunction(); + + ffi.Pointer cv_AgastFeatureDetector_create( + ffi.Pointer rval, + ) { + return _cv_AgastFeatureDetector_create( + rval, + ); + } + + late final _cv_AgastFeatureDetector_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>( + 'cv_AgastFeatureDetector_create'); + late final _cv_AgastFeatureDetector_create = + _cv_AgastFeatureDetector_createPtr.asFunction< + ffi.Pointer Function(ffi.Pointer)>(); + + ffi.Pointer cv_AgastFeatureDetector_create_1( + int threshold, + bool nonmaxSuppression, + int type, + ffi.Pointer rval, + ) { + return _cv_AgastFeatureDetector_create_1( + threshold, + nonmaxSuppression, + type, + rval, + ); + } + + late final _cv_AgastFeatureDetector_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Bool, ffi.Int, + ffi.Pointer)>>( + 'cv_AgastFeatureDetector_create_1'); + late final _cv_AgastFeatureDetector_create_1 = + _cv_AgastFeatureDetector_create_1Ptr.asFunction< + ffi.Pointer Function( + int, bool, int, ffi.Pointer)>(); + + ffi.Pointer cv_AgastFeatureDetector_detect( + AgastFeatureDetector self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_AgastFeatureDetector_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_AgastFeatureDetector_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + AgastFeatureDetector, + Mat, + ffi.Pointer, + Mat, + imp$1.CvCallback_0)>>('cv_AgastFeatureDetector_detect'); + late final _cv_AgastFeatureDetector_detect = + _cv_AgastFeatureDetector_detectPtr.asFunction< + ffi.Pointer Function(AgastFeatureDetector, Mat, + ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_AgastFeatureDetector_detectAndCompute( + AgastFeatureDetector self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_AgastFeatureDetector_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_AgastFeatureDetector_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + AgastFeatureDetector, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_AgastFeatureDetector_detectAndCompute'); + late final _cv_AgastFeatureDetector_detectAndCompute = + _cv_AgastFeatureDetector_detectAndComputePtr.asFunction< + ffi.Pointer Function(AgastFeatureDetector, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_AgastFeatureDetector_empty( + AgastFeatureDetector self, + ) { + return _cv_AgastFeatureDetector_empty( + self, + ); + } + + late final _cv_AgastFeatureDetector_emptyPtr = + _lookup>( + 'cv_AgastFeatureDetector_empty'); + late final _cv_AgastFeatureDetector_empty = _cv_AgastFeatureDetector_emptyPtr + .asFunction(); + + ffi.Pointer cv_AgastFeatureDetector_getDefaultName( + AgastFeatureDetector self, + ) { + return _cv_AgastFeatureDetector_getDefaultName( + self, + ); + } + + late final _cv_AgastFeatureDetector_getDefaultNamePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + AgastFeatureDetector)>>('cv_AgastFeatureDetector_getDefaultName'); + late final _cv_AgastFeatureDetector_getDefaultName = + _cv_AgastFeatureDetector_getDefaultNamePtr + .asFunction Function(AgastFeatureDetector)>(); + + bool cv_AgastFeatureDetector_getNonmaxSuppression( + AgastFeatureDetector self, + ) { + return _cv_AgastFeatureDetector_getNonmaxSuppression( + self, + ); + } + + late final _cv_AgastFeatureDetector_getNonmaxSuppressionPtr = + _lookup>( + 'cv_AgastFeatureDetector_getNonmaxSuppression'); + late final _cv_AgastFeatureDetector_getNonmaxSuppression = + _cv_AgastFeatureDetector_getNonmaxSuppressionPtr + .asFunction(); + + int cv_AgastFeatureDetector_getThreshold( + AgastFeatureDetector self, + ) { + return _cv_AgastFeatureDetector_getThreshold( + self, + ); + } + + late final _cv_AgastFeatureDetector_getThresholdPtr = + _lookup>( + 'cv_AgastFeatureDetector_getThreshold'); + late final _cv_AgastFeatureDetector_getThreshold = + _cv_AgastFeatureDetector_getThresholdPtr + .asFunction(); + + int cv_AgastFeatureDetector_getType( + AgastFeatureDetector self, + ) { + return _cv_AgastFeatureDetector_getType( + self, + ); + } + + late final _cv_AgastFeatureDetector_getTypePtr = + _lookup>( + 'cv_AgastFeatureDetector_getType'); + late final _cv_AgastFeatureDetector_getType = + _cv_AgastFeatureDetector_getTypePtr + .asFunction(); + + void cv_AgastFeatureDetector_setNonmaxSuppression( + AgastFeatureDetector self, + bool f, + ) { + return _cv_AgastFeatureDetector_setNonmaxSuppression( + self, + f, + ); + } + + late final _cv_AgastFeatureDetector_setNonmaxSuppressionPtr = _lookup< + ffi + .NativeFunction>( + 'cv_AgastFeatureDetector_setNonmaxSuppression'); + late final _cv_AgastFeatureDetector_setNonmaxSuppression = + _cv_AgastFeatureDetector_setNonmaxSuppressionPtr + .asFunction(); + + void cv_AgastFeatureDetector_setThreshold( + AgastFeatureDetector self, + int threshold, + ) { + return _cv_AgastFeatureDetector_setThreshold( + self, + threshold, + ); + } + + late final _cv_AgastFeatureDetector_setThresholdPtr = _lookup< + ffi.NativeFunction>( + 'cv_AgastFeatureDetector_setThreshold'); + late final _cv_AgastFeatureDetector_setThreshold = + _cv_AgastFeatureDetector_setThresholdPtr + .asFunction(); + + void cv_AgastFeatureDetector_setType( + AgastFeatureDetector self, + int type, + ) { + return _cv_AgastFeatureDetector_setType( + self, + type, + ); + } + + late final _cv_AgastFeatureDetector_setTypePtr = _lookup< + ffi.NativeFunction>( + 'cv_AgastFeatureDetector_setType'); + late final _cv_AgastFeatureDetector_setType = + _cv_AgastFeatureDetector_setTypePtr + .asFunction(); + + void cv_BFMatcher_close( + BFMatcherPtr self, + ) { + return _cv_BFMatcher_close( + self, + ); + } + + late final _cv_BFMatcher_closePtr = + _lookup>( + 'cv_BFMatcher_close'); + late final _cv_BFMatcher_close = + _cv_BFMatcher_closePtr.asFunction(); + + ffi.Pointer cv_BFMatcher_create( + ffi.Pointer rval, + ) { + return _cv_BFMatcher_create( + rval, + ); + } + + late final _cv_BFMatcher_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>('cv_BFMatcher_create'); + late final _cv_BFMatcher_create = _cv_BFMatcher_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_BFMatcher_create_1( + int normType, + bool crossCheck, + ffi.Pointer rval, + ) { + return _cv_BFMatcher_create_1( + normType, + crossCheck, + rval, + ); + } + + late final _cv_BFMatcher_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Bool, + ffi.Pointer)>>('cv_BFMatcher_create_1'); + late final _cv_BFMatcher_create_1 = _cv_BFMatcher_create_1Ptr.asFunction< + ffi.Pointer Function(int, bool, ffi.Pointer)>(); + + ffi.Pointer cv_BFMatcher_knnMatch( + BFMatcher self, + Mat query, + Mat train, + int k, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_BFMatcher_knnMatch( + self, + query, + train, + k, + rval, + callback, + ); + } + + late final _cv_BFMatcher_knnMatchPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + BFMatcher, + Mat, + Mat, + ffi.Int, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_BFMatcher_knnMatch'); + late final _cv_BFMatcher_knnMatch = _cv_BFMatcher_knnMatchPtr.asFunction< + ffi.Pointer Function(BFMatcher, Mat, Mat, int, + ffi.Pointer, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_BFMatcher_match( + BFMatcher self, + Mat query, + Mat train, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_BFMatcher_match( + self, + query, + train, + rval, + callback, + ); + } + + late final _cv_BFMatcher_matchPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + BFMatcher, + Mat, + Mat, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_BFMatcher_match'); + late final _cv_BFMatcher_match = _cv_BFMatcher_matchPtr.asFunction< + ffi.Pointer Function( + BFMatcher, Mat, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + + void cv_BRISK_close( + BRISKPtr self, + ) { + return _cv_BRISK_close( + self, + ); + } + + late final _cv_BRISK_closePtr = + _lookup>( + 'cv_BRISK_close'); + late final _cv_BRISK_close = + _cv_BRISK_closePtr.asFunction(); + + ffi.Pointer cv_BRISK_create( + ffi.Pointer rval, + ) { + return _cv_BRISK_create( + rval, + ); + } + + late final _cv_BRISK_createPtr = _lookup< + ffi + .NativeFunction Function(ffi.Pointer)>>( + 'cv_BRISK_create'); + late final _cv_BRISK_create = _cv_BRISK_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_BRISK_create_1( + VecF32 radiusList, + VecI32 numberList, + double dMax, + double dMin, + VecI32 indexChange, + ffi.Pointer rval, + ) { + return _cv_BRISK_create_1( + radiusList, + numberList, + dMax, + dMin, + indexChange, + rval, + ); + } + + late final _cv_BRISK_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(VecF32, VecI32, ffi.Float, ffi.Float, + VecI32, ffi.Pointer)>>('cv_BRISK_create_1'); + late final _cv_BRISK_create_1 = _cv_BRISK_create_1Ptr.asFunction< + ffi.Pointer Function( + VecF32, VecI32, double, double, VecI32, ffi.Pointer)>(); + + ffi.Pointer cv_BRISK_create_2( + int thresh, + int octaves, + VecF32 radiusList, + VecI32 numberList, + double dMax, + double dMin, + VecI32 indexChange, + ffi.Pointer rval, + ) { + return _cv_BRISK_create_2( + thresh, + octaves, + radiusList, + numberList, + dMax, + dMin, + indexChange, + rval, + ); + } + + late final _cv_BRISK_create_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Int, + VecF32, + VecI32, + ffi.Float, + ffi.Float, + VecI32, + ffi.Pointer)>>('cv_BRISK_create_2'); + late final _cv_BRISK_create_2 = _cv_BRISK_create_2Ptr.asFunction< + ffi.Pointer Function(int, int, VecF32, VecI32, double, double, + VecI32, ffi.Pointer)>(); + + ffi.Pointer cv_BRISK_create_3( + int thresh, + int octaves, + double patternScale, + ffi.Pointer rval, + ) { + return _cv_BRISK_create_3( + thresh, + octaves, + patternScale, + rval, + ); + } + + late final _cv_BRISK_create_3Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Int, ffi.Float, + ffi.Pointer)>>('cv_BRISK_create_3'); + late final _cv_BRISK_create_3 = _cv_BRISK_create_3Ptr.asFunction< + ffi.Pointer Function(int, int, double, ffi.Pointer)>(); + + ffi.Pointer cv_BRISK_detect( + BRISK self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_BRISK_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_BRISK_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(BRISK, Mat, ffi.Pointer, + Mat, imp$1.CvCallback_0)>>('cv_BRISK_detect'); + late final _cv_BRISK_detect = _cv_BRISK_detectPtr.asFunction< + ffi.Pointer Function( + BRISK, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_BRISK_detectAndCompute( + BRISK self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_BRISK_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_BRISK_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + BRISK, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_BRISK_detectAndCompute'); + late final _cv_BRISK_detectAndCompute = + _cv_BRISK_detectAndComputePtr.asFunction< + ffi.Pointer Function(BRISK, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_BRISK_empty( + BRISK self, + ) { + return _cv_BRISK_empty( + self, + ); + } + + late final _cv_BRISK_emptyPtr = + _lookup>('cv_BRISK_empty'); + late final _cv_BRISK_empty = + _cv_BRISK_emptyPtr.asFunction(); + + ffi.Pointer cv_BRISK_getDefaultName( + BRISK self, + ) { + return _cv_BRISK_getDefaultName( + self, + ); + } + + late final _cv_BRISK_getDefaultNamePtr = + _lookup Function(BRISK)>>( + 'cv_BRISK_getDefaultName'); + late final _cv_BRISK_getDefaultName = _cv_BRISK_getDefaultNamePtr + .asFunction Function(BRISK)>(); + + int cv_BRISK_getOctaves( + BRISK self, + ) { + return _cv_BRISK_getOctaves( + self, + ); + } + + late final _cv_BRISK_getOctavesPtr = + _lookup>( + 'cv_BRISK_getOctaves'); + late final _cv_BRISK_getOctaves = + _cv_BRISK_getOctavesPtr.asFunction(); + + double cv_BRISK_getPatternScale( + BRISK self, + ) { + return _cv_BRISK_getPatternScale( + self, + ); + } + + late final _cv_BRISK_getPatternScalePtr = + _lookup>( + 'cv_BRISK_getPatternScale'); + late final _cv_BRISK_getPatternScale = + _cv_BRISK_getPatternScalePtr.asFunction(); + + int cv_BRISK_getThreshold( + BRISK self, + ) { + return _cv_BRISK_getThreshold( + self, + ); + } + + late final _cv_BRISK_getThresholdPtr = + _lookup>( + 'cv_BRISK_getThreshold'); + late final _cv_BRISK_getThreshold = + _cv_BRISK_getThresholdPtr.asFunction(); + + void cv_BRISK_setOctaves( + BRISK self, + int octaves, + ) { + return _cv_BRISK_setOctaves( + self, + octaves, + ); + } + + late final _cv_BRISK_setOctavesPtr = + _lookup>( + 'cv_BRISK_setOctaves'); + late final _cv_BRISK_setOctaves = + _cv_BRISK_setOctavesPtr.asFunction(); + + void cv_BRISK_setPatternScale( + BRISK self, + double patternScale, + ) { + return _cv_BRISK_setPatternScale( + self, + patternScale, + ); + } + + late final _cv_BRISK_setPatternScalePtr = + _lookup>( + 'cv_BRISK_setPatternScale'); + late final _cv_BRISK_setPatternScale = + _cv_BRISK_setPatternScalePtr.asFunction(); + + void cv_BRISK_setThreshold( + BRISK self, + int threshold, + ) { + return _cv_BRISK_setThreshold( + self, + threshold, + ); + } + + late final _cv_BRISK_setThresholdPtr = + _lookup>( + 'cv_BRISK_setThreshold'); + late final _cv_BRISK_setThreshold = + _cv_BRISK_setThresholdPtr.asFunction(); + + void cv_FastFeatureDetector_close( + FastFeatureDetectorPtr self, + ) { + return _cv_FastFeatureDetector_close( + self, + ); + } + + late final _cv_FastFeatureDetector_closePtr = + _lookup>( + 'cv_FastFeatureDetector_close'); + late final _cv_FastFeatureDetector_close = _cv_FastFeatureDetector_closePtr + .asFunction(); + + ffi.Pointer cv_FastFeatureDetector_create( + ffi.Pointer rval, + ) { + return _cv_FastFeatureDetector_create( + rval, + ); + } + + late final _cv_FastFeatureDetector_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>( + 'cv_FastFeatureDetector_create'); + late final _cv_FastFeatureDetector_create = + _cv_FastFeatureDetector_createPtr.asFunction< + ffi.Pointer Function(ffi.Pointer)>(); + + ffi.Pointer cv_FastFeatureDetector_create_1( + int threshold, + bool nonmaxSuppression, + int type, + ffi.Pointer rval, + ) { + return _cv_FastFeatureDetector_create_1( + threshold, + nonmaxSuppression, + type, + rval, + ); + } + + late final _cv_FastFeatureDetector_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Int, ffi.Bool, ffi.Int, + ffi.Pointer)>>( + 'cv_FastFeatureDetector_create_1'); + late final _cv_FastFeatureDetector_create_1 = _cv_FastFeatureDetector_create_1Ptr.asFunction< ffi.Pointer Function( - int, bool, int, ffi.Pointer)>(); + int, bool, int, ffi.Pointer)>(); + + ffi.Pointer cv_FastFeatureDetector_detect( + FastFeatureDetector self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_FastFeatureDetector_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_FastFeatureDetector_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FastFeatureDetector, + Mat, + ffi.Pointer, + Mat, + imp$1.CvCallback_0)>>('cv_FastFeatureDetector_detect'); + late final _cv_FastFeatureDetector_detect = + _cv_FastFeatureDetector_detectPtr.asFunction< + ffi.Pointer Function(FastFeatureDetector, Mat, + ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_FastFeatureDetector_detectAndCompute( + FastFeatureDetector self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_FastFeatureDetector_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_FastFeatureDetector_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FastFeatureDetector, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_FastFeatureDetector_detectAndCompute'); + late final _cv_FastFeatureDetector_detectAndCompute = + _cv_FastFeatureDetector_detectAndComputePtr.asFunction< + ffi.Pointer Function(FastFeatureDetector, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_FastFeatureDetector_empty( + FastFeatureDetector self, + ) { + return _cv_FastFeatureDetector_empty( + self, + ); + } + + late final _cv_FastFeatureDetector_emptyPtr = + _lookup>( + 'cv_FastFeatureDetector_empty'); + late final _cv_FastFeatureDetector_empty = _cv_FastFeatureDetector_emptyPtr + .asFunction(); + + ffi.Pointer cv_FastFeatureDetector_getDefaultName( + FastFeatureDetector self, + ) { + return _cv_FastFeatureDetector_getDefaultName( + self, + ); + } + + late final _cv_FastFeatureDetector_getDefaultNamePtr = _lookup< + ffi + .NativeFunction Function(FastFeatureDetector)>>( + 'cv_FastFeatureDetector_getDefaultName'); + late final _cv_FastFeatureDetector_getDefaultName = + _cv_FastFeatureDetector_getDefaultNamePtr + .asFunction Function(FastFeatureDetector)>(); + + bool cv_FastFeatureDetector_getNonmaxSuppression( + FastFeatureDetector self, + ) { + return _cv_FastFeatureDetector_getNonmaxSuppression( + self, + ); + } + + late final _cv_FastFeatureDetector_getNonmaxSuppressionPtr = + _lookup>( + 'cv_FastFeatureDetector_getNonmaxSuppression'); + late final _cv_FastFeatureDetector_getNonmaxSuppression = + _cv_FastFeatureDetector_getNonmaxSuppressionPtr + .asFunction(); + + int cv_FastFeatureDetector_getThreshold( + FastFeatureDetector self, + ) { + return _cv_FastFeatureDetector_getThreshold( + self, + ); + } + + late final _cv_FastFeatureDetector_getThresholdPtr = + _lookup>( + 'cv_FastFeatureDetector_getThreshold'); + late final _cv_FastFeatureDetector_getThreshold = + _cv_FastFeatureDetector_getThresholdPtr + .asFunction(); + + int cv_FastFeatureDetector_getType( + FastFeatureDetector self, + ) { + return _cv_FastFeatureDetector_getType( + self, + ); + } + + late final _cv_FastFeatureDetector_getTypePtr = + _lookup>( + 'cv_FastFeatureDetector_getType'); + late final _cv_FastFeatureDetector_getType = + _cv_FastFeatureDetector_getTypePtr + .asFunction(); + + void cv_FastFeatureDetector_setNonmaxSuppression( + FastFeatureDetector self, + bool f, + ) { + return _cv_FastFeatureDetector_setNonmaxSuppression( + self, + f, + ); + } + + late final _cv_FastFeatureDetector_setNonmaxSuppressionPtr = _lookup< + ffi.NativeFunction>( + 'cv_FastFeatureDetector_setNonmaxSuppression'); + late final _cv_FastFeatureDetector_setNonmaxSuppression = + _cv_FastFeatureDetector_setNonmaxSuppressionPtr + .asFunction(); + + void cv_FastFeatureDetector_setThreshold( + FastFeatureDetector self, + int threshold, + ) { + return _cv_FastFeatureDetector_setThreshold( + self, + threshold, + ); + } + + late final _cv_FastFeatureDetector_setThresholdPtr = _lookup< + ffi.NativeFunction>( + 'cv_FastFeatureDetector_setThreshold'); + late final _cv_FastFeatureDetector_setThreshold = + _cv_FastFeatureDetector_setThresholdPtr + .asFunction(); + + void cv_FastFeatureDetector_setType( + FastFeatureDetector self, + int type, + ) { + return _cv_FastFeatureDetector_setType( + self, + type, + ); + } + + late final _cv_FastFeatureDetector_setTypePtr = _lookup< + ffi.NativeFunction>( + 'cv_FastFeatureDetector_setType'); + late final _cv_FastFeatureDetector_setType = + _cv_FastFeatureDetector_setTypePtr + .asFunction(); + + void cv_FlannBasedMatcher_close( + FlannBasedMatcherPtr self, + ) { + return _cv_FlannBasedMatcher_close( + self, + ); + } + + late final _cv_FlannBasedMatcher_closePtr = + _lookup>( + 'cv_FlannBasedMatcher_close'); + late final _cv_FlannBasedMatcher_close = _cv_FlannBasedMatcher_closePtr + .asFunction(); + + ffi.Pointer cv_FlannBasedMatcher_create( + ffi.Pointer rval, + ) { + return _cv_FlannBasedMatcher_create( + rval, + ); + } + + late final _cv_FlannBasedMatcher_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>('cv_FlannBasedMatcher_create'); + late final _cv_FlannBasedMatcher_create = + _cv_FlannBasedMatcher_createPtr.asFunction< + ffi.Pointer Function(ffi.Pointer)>(); + + ffi.Pointer cv_FlannBasedMatcher_create_1( + ffi.Pointer rval, + FlannIndexParams indexParams, + FlannIndexParams searchParams, + ) { + return _cv_FlannBasedMatcher_create_1( + rval, + indexParams, + searchParams, + ); + } + + late final _cv_FlannBasedMatcher_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer, + FlannIndexParams, + FlannIndexParams)>>('cv_FlannBasedMatcher_create_1'); + late final _cv_FlannBasedMatcher_create_1 = + _cv_FlannBasedMatcher_create_1Ptr.asFunction< + ffi.Pointer Function(ffi.Pointer, + FlannIndexParams, FlannIndexParams)>(); + + ffi.Pointer cv_FlannBasedMatcher_knnMatch( + FlannBasedMatcher self, + Mat query, + Mat train, + int k, + ffi.Pointer rval, + imp$1.CvCallback_0 callback, + ) { + return _cv_FlannBasedMatcher_knnMatch( + self, + query, + train, + k, + rval, + callback, + ); + } + + late final _cv_FlannBasedMatcher_knnMatchPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + FlannBasedMatcher, + Mat, + Mat, + ffi.Int, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_FlannBasedMatcher_knnMatch'); + late final _cv_FlannBasedMatcher_knnMatch = + _cv_FlannBasedMatcher_knnMatchPtr.asFunction< + ffi.Pointer Function(FlannBasedMatcher, Mat, Mat, int, + ffi.Pointer, imp$1.CvCallback_0)>(); + + void cv_GFTTDetector_close( + GFTTDetectorPtr self, + ) { + return _cv_GFTTDetector_close( + self, + ); + } + + late final _cv_GFTTDetector_closePtr = + _lookup>( + 'cv_GFTTDetector_close'); + late final _cv_GFTTDetector_close = + _cv_GFTTDetector_closePtr.asFunction(); + + ffi.Pointer cv_GFTTDetector_create( + ffi.Pointer rval, + ) { + return _cv_GFTTDetector_create( + rval, + ); + } + + late final _cv_GFTTDetector_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Pointer)>>('cv_GFTTDetector_create'); + late final _cv_GFTTDetector_create = _cv_GFTTDetector_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_GFTTDetector_create_1( + int maxCorners, + double qualityLevel, + double minDistance, + int blockSize, + int gradiantSize, + bool useHarrisDetector, + double k, + ffi.Pointer rval, + ) { + return _cv_GFTTDetector_create_1( + maxCorners, + qualityLevel, + minDistance, + blockSize, + gradiantSize, + useHarrisDetector, + k, + rval, + ); + } + + late final _cv_GFTTDetector_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + ffi.Int, + ffi.Bool, + ffi.Double, + ffi.Pointer)>>('cv_GFTTDetector_create_1'); + late final _cv_GFTTDetector_create_1 = + _cv_GFTTDetector_create_1Ptr.asFunction< + ffi.Pointer Function(int, double, double, int, int, bool, + double, ffi.Pointer)>(); + + ffi.Pointer cv_GFTTDetector_create_2( + int maxCorners, + double qualityLevel, + double minDistance, + int blockSize, + bool useHarrisDetector, + double k, + ffi.Pointer rval, + ) { + return _cv_GFTTDetector_create_2( + maxCorners, + qualityLevel, + minDistance, + blockSize, + useHarrisDetector, + k, + rval, + ); + } + + late final _cv_GFTTDetector_create_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + ffi.Bool, + ffi.Double, + ffi.Pointer)>>('cv_GFTTDetector_create_2'); + late final _cv_GFTTDetector_create_2 = + _cv_GFTTDetector_create_2Ptr.asFunction< + ffi.Pointer Function(int, double, double, int, bool, double, + ffi.Pointer)>(); + + ffi.Pointer cv_GFTTDetector_detect( + GFTTDetector self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_GFTTDetector_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_GFTTDetector_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + GFTTDetector, + Mat, + ffi.Pointer, + Mat, + imp$1.CvCallback_0)>>('cv_GFTTDetector_detect'); + late final _cv_GFTTDetector_detect = _cv_GFTTDetector_detectPtr.asFunction< + ffi.Pointer Function(GFTTDetector, Mat, + ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_GFTTDetector_detectAndCompute( + GFTTDetector self, + Mat src, + Mat mask, + Mat descriptors, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_GFTTDetector_detectAndCompute( + self, + src, + mask, + descriptors, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_GFTTDetector_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + GFTTDetector, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_GFTTDetector_detectAndCompute'); + late final _cv_GFTTDetector_detectAndCompute = + _cv_GFTTDetector_detectAndComputePtr.asFunction< + ffi.Pointer Function(GFTTDetector, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_GFTTDetector_empty( + GFTTDetector self, + ) { + return _cv_GFTTDetector_empty( + self, + ); + } + + late final _cv_GFTTDetector_emptyPtr = + _lookup>( + 'cv_GFTTDetector_empty'); + late final _cv_GFTTDetector_empty = + _cv_GFTTDetector_emptyPtr.asFunction(); + + int cv_GFTTDetector_getBlockSize( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getBlockSize( + self, + ); + } + + late final _cv_GFTTDetector_getBlockSizePtr = + _lookup>( + 'cv_GFTTDetector_getBlockSize'); + late final _cv_GFTTDetector_getBlockSize = + _cv_GFTTDetector_getBlockSizePtr.asFunction(); + + ffi.Pointer cv_GFTTDetector_getDefaultName( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getDefaultName( + self, + ); + } + + late final _cv_GFTTDetector_getDefaultNamePtr = + _lookup Function(GFTTDetector)>>( + 'cv_GFTTDetector_getDefaultName'); + late final _cv_GFTTDetector_getDefaultName = + _cv_GFTTDetector_getDefaultNamePtr + .asFunction Function(GFTTDetector)>(); + + int cv_GFTTDetector_getGradientSize( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getGradientSize( + self, + ); + } + + late final _cv_GFTTDetector_getGradientSizePtr = + _lookup>( + 'cv_GFTTDetector_getGradientSize'); + late final _cv_GFTTDetector_getGradientSize = + _cv_GFTTDetector_getGradientSizePtr + .asFunction(); + + bool cv_GFTTDetector_getHarrisDetector( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getHarrisDetector( + self, + ); + } + + late final _cv_GFTTDetector_getHarrisDetectorPtr = + _lookup>( + 'cv_GFTTDetector_getHarrisDetector'); + late final _cv_GFTTDetector_getHarrisDetector = + _cv_GFTTDetector_getHarrisDetectorPtr + .asFunction(); + + double cv_GFTTDetector_getK( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getK( + self, + ); + } + + late final _cv_GFTTDetector_getKPtr = + _lookup>( + 'cv_GFTTDetector_getK'); + late final _cv_GFTTDetector_getK = + _cv_GFTTDetector_getKPtr.asFunction(); + + int cv_GFTTDetector_getMaxFeatures( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getMaxFeatures( + self, + ); + } + + late final _cv_GFTTDetector_getMaxFeaturesPtr = + _lookup>( + 'cv_GFTTDetector_getMaxFeatures'); + late final _cv_GFTTDetector_getMaxFeatures = + _cv_GFTTDetector_getMaxFeaturesPtr + .asFunction(); + + double cv_GFTTDetector_getMinDistance( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getMinDistance( + self, + ); + } + + late final _cv_GFTTDetector_getMinDistancePtr = + _lookup>( + 'cv_GFTTDetector_getMinDistance'); + late final _cv_GFTTDetector_getMinDistance = + _cv_GFTTDetector_getMinDistancePtr + .asFunction(); + + double cv_GFTTDetector_getQualityLevel( + GFTTDetector self, + ) { + return _cv_GFTTDetector_getQualityLevel( + self, + ); + } + + late final _cv_GFTTDetector_getQualityLevelPtr = + _lookup>( + 'cv_GFTTDetector_getQualityLevel'); + late final _cv_GFTTDetector_getQualityLevel = + _cv_GFTTDetector_getQualityLevelPtr + .asFunction(); + + void cv_GFTTDetector_setBlockSize( + GFTTDetector self, + int blockSize, + ) { + return _cv_GFTTDetector_setBlockSize( + self, + blockSize, + ); + } + + late final _cv_GFTTDetector_setBlockSizePtr = + _lookup>( + 'cv_GFTTDetector_setBlockSize'); + late final _cv_GFTTDetector_setBlockSize = _cv_GFTTDetector_setBlockSizePtr + .asFunction(); + + void cv_GFTTDetector_setGradientSize( + GFTTDetector self, + int gradientSize_, + ) { + return _cv_GFTTDetector_setGradientSize( + self, + gradientSize_, + ); + } + + late final _cv_GFTTDetector_setGradientSizePtr = + _lookup>( + 'cv_GFTTDetector_setGradientSize'); + late final _cv_GFTTDetector_setGradientSize = + _cv_GFTTDetector_setGradientSizePtr + .asFunction(); + + void cv_GFTTDetector_setHarrisDetector( + GFTTDetector self, + bool val, + ) { + return _cv_GFTTDetector_setHarrisDetector( + self, + val, + ); + } + + late final _cv_GFTTDetector_setHarrisDetectorPtr = + _lookup>( + 'cv_GFTTDetector_setHarrisDetector'); + late final _cv_GFTTDetector_setHarrisDetector = + _cv_GFTTDetector_setHarrisDetectorPtr + .asFunction(); + + void cv_GFTTDetector_setK( + GFTTDetector self, + double k, + ) { + return _cv_GFTTDetector_setK( + self, + k, + ); + } + + late final _cv_GFTTDetector_setKPtr = + _lookup>( + 'cv_GFTTDetector_setK'); + late final _cv_GFTTDetector_setK = _cv_GFTTDetector_setKPtr + .asFunction(); + + void cv_GFTTDetector_setMaxFeatures( + GFTTDetector self, + int maxFeatures, + ) { + return _cv_GFTTDetector_setMaxFeatures( + self, + maxFeatures, + ); + } + + late final _cv_GFTTDetector_setMaxFeaturesPtr = + _lookup>( + 'cv_GFTTDetector_setMaxFeatures'); + late final _cv_GFTTDetector_setMaxFeatures = + _cv_GFTTDetector_setMaxFeaturesPtr + .asFunction(); + + void cv_GFTTDetector_setMinDistance( + GFTTDetector self, + double minDistance, + ) { + return _cv_GFTTDetector_setMinDistance( + self, + minDistance, + ); + } + + late final _cv_GFTTDetector_setMinDistancePtr = + _lookup>( + 'cv_GFTTDetector_setMinDistance'); + late final _cv_GFTTDetector_setMinDistance = + _cv_GFTTDetector_setMinDistancePtr + .asFunction(); + + void cv_GFTTDetector_setQualityLevel( + GFTTDetector self, + double qlevel, + ) { + return _cv_GFTTDetector_setQualityLevel( + self, + qlevel, + ); + } + + late final _cv_GFTTDetector_setQualityLevelPtr = + _lookup>( + 'cv_GFTTDetector_setQualityLevel'); + late final _cv_GFTTDetector_setQualityLevel = + _cv_GFTTDetector_setQualityLevelPtr + .asFunction(); + + void cv_KAZE_close( + KAZEPtr self, + ) { + return _cv_KAZE_close( + self, + ); + } + + late final _cv_KAZE_closePtr = + _lookup>('cv_KAZE_close'); + late final _cv_KAZE_close = + _cv_KAZE_closePtr.asFunction(); + + ffi.Pointer cv_KAZE_create( + ffi.Pointer rval, + ) { + return _cv_KAZE_create( + rval, + ); + } + + late final _cv_KAZE_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Pointer)>>('cv_KAZE_create'); + late final _cv_KAZE_create = _cv_KAZE_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_KAZE_create_1( + bool extended, + bool upright, + double threshold, + int nOctaves, + int nOctaveLayers, + int diffusivity, + ffi.Pointer rval, + ) { + return _cv_KAZE_create_1( + extended, + upright, + threshold, + nOctaves, + nOctaveLayers, + diffusivity, + rval, + ); + } + + late final _cv_KAZE_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Bool, ffi.Bool, ffi.Float, ffi.Int, + ffi.Int, ffi.Int, ffi.Pointer)>>('cv_KAZE_create_1'); + late final _cv_KAZE_create_1 = _cv_KAZE_create_1Ptr.asFunction< + ffi.Pointer Function( + bool, bool, double, int, int, int, ffi.Pointer)>(); + + ffi.Pointer cv_KAZE_detect( + KAZE self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_KAZE_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_KAZE_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(KAZE, Mat, ffi.Pointer, + Mat, imp$1.CvCallback_0)>>('cv_KAZE_detect'); + late final _cv_KAZE_detect = _cv_KAZE_detectPtr.asFunction< + ffi.Pointer Function( + KAZE, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_KAZE_detectAndCompute( + KAZE self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_KAZE_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_KAZE_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + KAZE, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_KAZE_detectAndCompute'); + late final _cv_KAZE_detectAndCompute = + _cv_KAZE_detectAndComputePtr.asFunction< + ffi.Pointer Function(KAZE, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_KAZE_empty( + KAZE self, + ) { + return _cv_KAZE_empty( + self, + ); + } + + late final _cv_KAZE_emptyPtr = + _lookup>('cv_KAZE_empty'); + late final _cv_KAZE_empty = + _cv_KAZE_emptyPtr.asFunction(); + + ffi.Pointer cv_KAZE_getDefaultName( + KAZE self, + ) { + return _cv_KAZE_getDefaultName( + self, + ); + } + + late final _cv_KAZE_getDefaultNamePtr = + _lookup Function(KAZE)>>( + 'cv_KAZE_getDefaultName'); + late final _cv_KAZE_getDefaultName = _cv_KAZE_getDefaultNamePtr + .asFunction Function(KAZE)>(); + + int cv_KAZE_getDiffusivity( + KAZE self, + ) { + return _cv_KAZE_getDiffusivity( + self, + ); + } + + late final _cv_KAZE_getDiffusivityPtr = + _lookup>( + 'cv_KAZE_getDiffusivity'); + late final _cv_KAZE_getDiffusivity = + _cv_KAZE_getDiffusivityPtr.asFunction(); + + bool cv_KAZE_getExtended( + KAZE self, + ) { + return _cv_KAZE_getExtended( + self, + ); + } + + late final _cv_KAZE_getExtendedPtr = + _lookup>( + 'cv_KAZE_getExtended'); + late final _cv_KAZE_getExtended = + _cv_KAZE_getExtendedPtr.asFunction(); + + int cv_KAZE_getNOctaveLayers( + KAZE self, + ) { + return _cv_KAZE_getNOctaveLayers( + self, + ); + } + + late final _cv_KAZE_getNOctaveLayersPtr = + _lookup>( + 'cv_KAZE_getNOctaveLayers'); + late final _cv_KAZE_getNOctaveLayers = + _cv_KAZE_getNOctaveLayersPtr.asFunction(); + + int cv_KAZE_getNOctaves( + KAZE self, + ) { + return _cv_KAZE_getNOctaves( + self, + ); + } + + late final _cv_KAZE_getNOctavesPtr = + _lookup>( + 'cv_KAZE_getNOctaves'); + late final _cv_KAZE_getNOctaves = + _cv_KAZE_getNOctavesPtr.asFunction(); + + double cv_KAZE_getThreshold( + KAZE self, + ) { + return _cv_KAZE_getThreshold( + self, + ); + } + + late final _cv_KAZE_getThresholdPtr = + _lookup>( + 'cv_KAZE_getThreshold'); + late final _cv_KAZE_getThreshold = + _cv_KAZE_getThresholdPtr.asFunction(); + + bool cv_KAZE_getUpright( + KAZE self, + ) { + return _cv_KAZE_getUpright( + self, + ); + } + + late final _cv_KAZE_getUprightPtr = + _lookup>( + 'cv_KAZE_getUpright'); + late final _cv_KAZE_getUpright = + _cv_KAZE_getUprightPtr.asFunction(); + + void cv_KAZE_setDiffusivity( + KAZE self, + int diff, + ) { + return _cv_KAZE_setDiffusivity( + self, + diff, + ); + } + + late final _cv_KAZE_setDiffusivityPtr = + _lookup>( + 'cv_KAZE_setDiffusivity'); + late final _cv_KAZE_setDiffusivity = + _cv_KAZE_setDiffusivityPtr.asFunction(); + + void cv_KAZE_setExtended( + KAZE self, + bool extended, + ) { + return _cv_KAZE_setExtended( + self, + extended, + ); + } + + late final _cv_KAZE_setExtendedPtr = + _lookup>( + 'cv_KAZE_setExtended'); + late final _cv_KAZE_setExtended = + _cv_KAZE_setExtendedPtr.asFunction(); + + void cv_KAZE_setNOctaveLayers( + KAZE self, + int octaveLayers, + ) { + return _cv_KAZE_setNOctaveLayers( + self, + octaveLayers, + ); + } + + late final _cv_KAZE_setNOctaveLayersPtr = + _lookup>( + 'cv_KAZE_setNOctaveLayers'); + late final _cv_KAZE_setNOctaveLayers = + _cv_KAZE_setNOctaveLayersPtr.asFunction(); + + void cv_KAZE_setNOctaves( + KAZE self, + int octaves, + ) { + return _cv_KAZE_setNOctaves( + self, + octaves, + ); + } + + late final _cv_KAZE_setNOctavesPtr = + _lookup>( + 'cv_KAZE_setNOctaves'); + late final _cv_KAZE_setNOctaves = + _cv_KAZE_setNOctavesPtr.asFunction(); + + void cv_KAZE_setThreshold( + KAZE self, + double threshold, + ) { + return _cv_KAZE_setThreshold( + self, + threshold, + ); + } + + late final _cv_KAZE_setThresholdPtr = + _lookup>( + 'cv_KAZE_setThreshold'); + late final _cv_KAZE_setThreshold = + _cv_KAZE_setThresholdPtr.asFunction(); + + void cv_KAZE_setUpright( + KAZE self, + bool upright, + ) { + return _cv_KAZE_setUpright( + self, + upright, + ); + } + + late final _cv_KAZE_setUprightPtr = + _lookup>( + 'cv_KAZE_setUpright'); + late final _cv_KAZE_setUpright = + _cv_KAZE_setUprightPtr.asFunction(); + + void cv_MSER_close( + MSERPtr self, + ) { + return _cv_MSER_close( + self, + ); + } + + late final _cv_MSER_closePtr = + _lookup>('cv_MSER_close'); + late final _cv_MSER_close = + _cv_MSER_closePtr.asFunction(); + + ffi.Pointer cv_MSER_create( + ffi.Pointer rval, + ) { + return _cv_MSER_create( + rval, + ); + } + + late final _cv_MSER_createPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ffi.Pointer)>>('cv_MSER_create'); + late final _cv_MSER_create = _cv_MSER_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_MSER_create_1( + int delta, + int min_area, + int max_area, + double max_variation, + double min_diversity, + int max_evolution, + double area_threshold, + double min_margin, + int edge_blur_size, + ffi.Pointer rval, + ) { + return _cv_MSER_create_1( + delta, + min_area, + max_area, + max_variation, + min_diversity, + max_evolution, + area_threshold, + min_margin, + edge_blur_size, + rval, + ); + } + + late final _cv_MSER_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Int, + ffi.Pointer)>>('cv_MSER_create_1'); + late final _cv_MSER_create_1 = _cv_MSER_create_1Ptr.asFunction< + ffi.Pointer Function(int, int, int, double, double, int, double, + double, int, ffi.Pointer)>(); + + ffi.Pointer cv_MSER_detect( + MSER self, + Mat src, + ffi.Pointer rval, + Mat mask, + imp$1.CvCallback_0 callback, + ) { + return _cv_MSER_detect( + self, + src, + rval, + mask, + callback, + ); + } + + late final _cv_MSER_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(MSER, Mat, ffi.Pointer, + Mat, imp$1.CvCallback_0)>>('cv_MSER_detect'); + late final _cv_MSER_detect = _cv_MSER_detectPtr.asFunction< + ffi.Pointer Function( + MSER, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_MSER_detectAndCompute( + MSER self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_MSER_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_MSER_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + MSER, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_MSER_detectAndCompute'); + late final _cv_MSER_detectAndCompute = + _cv_MSER_detectAndComputePtr.asFunction< + ffi.Pointer Function(MSER, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_MSER_detectRegions( + MSER self, + Mat image, + ffi.Pointer rval, + ffi.Pointer bboxes, + imp$1.CvCallback_0 callback, + ) { + return _cv_MSER_detectRegions( + self, + image, + rval, + bboxes, + callback, + ); + } + + late final _cv_MSER_detectRegionsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + MSER, + Mat, + ffi.Pointer, + ffi.Pointer, + imp$1.CvCallback_0)>>('cv_MSER_detectRegions'); + late final _cv_MSER_detectRegions = _cv_MSER_detectRegionsPtr.asFunction< + ffi.Pointer Function(MSER, Mat, ffi.Pointer, + ffi.Pointer, imp$1.CvCallback_0)>(); + + bool cv_MSER_empty( + MSER self, + ) { + return _cv_MSER_empty( + self, + ); + } + + late final _cv_MSER_emptyPtr = + _lookup>('cv_MSER_empty'); + late final _cv_MSER_empty = + _cv_MSER_emptyPtr.asFunction(); + + double cv_MSER_getAreaThreshold( + MSER self, + ) { + return _cv_MSER_getAreaThreshold( + self, + ); + } + + late final _cv_MSER_getAreaThresholdPtr = + _lookup>( + 'cv_MSER_getAreaThreshold'); + late final _cv_MSER_getAreaThreshold = + _cv_MSER_getAreaThresholdPtr.asFunction(); + + ffi.Pointer cv_MSER_getDefaultName( + MSER self, + ) { + return _cv_MSER_getDefaultName( + self, + ); + } + + late final _cv_MSER_getDefaultNamePtr = + _lookup Function(MSER)>>( + 'cv_MSER_getDefaultName'); + late final _cv_MSER_getDefaultName = _cv_MSER_getDefaultNamePtr + .asFunction Function(MSER)>(); + + int cv_MSER_getDelta( + MSER self, + ) { + return _cv_MSER_getDelta( + self, + ); + } + + late final _cv_MSER_getDeltaPtr = + _lookup>('cv_MSER_getDelta'); + late final _cv_MSER_getDelta = + _cv_MSER_getDeltaPtr.asFunction(); + + int cv_MSER_getEdgeBlurSize( + MSER self, + ) { + return _cv_MSER_getEdgeBlurSize( + self, + ); + } + + late final _cv_MSER_getEdgeBlurSizePtr = + _lookup>( + 'cv_MSER_getEdgeBlurSize'); + late final _cv_MSER_getEdgeBlurSize = + _cv_MSER_getEdgeBlurSizePtr.asFunction(); + + int cv_MSER_getMaxArea( + MSER self, + ) { + return _cv_MSER_getMaxArea( + self, + ); + } + + late final _cv_MSER_getMaxAreaPtr = + _lookup>('cv_MSER_getMaxArea'); + late final _cv_MSER_getMaxArea = + _cv_MSER_getMaxAreaPtr.asFunction(); + + int cv_MSER_getMaxEvolution( + MSER self, + ) { + return _cv_MSER_getMaxEvolution( + self, + ); + } + + late final _cv_MSER_getMaxEvolutionPtr = + _lookup>( + 'cv_MSER_getMaxEvolution'); + late final _cv_MSER_getMaxEvolution = + _cv_MSER_getMaxEvolutionPtr.asFunction(); + + double cv_MSER_getMaxVariation( + MSER self, + ) { + return _cv_MSER_getMaxVariation( + self, + ); + } + + late final _cv_MSER_getMaxVariationPtr = + _lookup>( + 'cv_MSER_getMaxVariation'); + late final _cv_MSER_getMaxVariation = + _cv_MSER_getMaxVariationPtr.asFunction(); + + int cv_MSER_getMinArea( + MSER self, + ) { + return _cv_MSER_getMinArea( + self, + ); + } + + late final _cv_MSER_getMinAreaPtr = + _lookup>('cv_MSER_getMinArea'); + late final _cv_MSER_getMinArea = + _cv_MSER_getMinAreaPtr.asFunction(); + + double cv_MSER_getMinDiversity( + MSER self, + ) { + return _cv_MSER_getMinDiversity( + self, + ); + } + + late final _cv_MSER_getMinDiversityPtr = + _lookup>( + 'cv_MSER_getMinDiversity'); + late final _cv_MSER_getMinDiversity = + _cv_MSER_getMinDiversityPtr.asFunction(); + + double cv_MSER_getMinMargin( + MSER self, + ) { + return _cv_MSER_getMinMargin( + self, + ); + } + + late final _cv_MSER_getMinMarginPtr = + _lookup>( + 'cv_MSER_getMinMargin'); + late final _cv_MSER_getMinMargin = + _cv_MSER_getMinMarginPtr.asFunction(); + + bool cv_MSER_getPass2Only( + MSER self, + ) { + return _cv_MSER_getPass2Only( + self, + ); + } + + late final _cv_MSER_getPass2OnlyPtr = + _lookup>( + 'cv_MSER_getPass2Only'); + late final _cv_MSER_getPass2Only = + _cv_MSER_getPass2OnlyPtr.asFunction(); + + void cv_MSER_setAreaThreshold( + MSER self, + double areaThreshold, + ) { + return _cv_MSER_setAreaThreshold( + self, + areaThreshold, + ); + } + + late final _cv_MSER_setAreaThresholdPtr = + _lookup>( + 'cv_MSER_setAreaThreshold'); + late final _cv_MSER_setAreaThreshold = + _cv_MSER_setAreaThresholdPtr.asFunction(); + + void cv_MSER_setDelta( + MSER self, + int delta, + ) { + return _cv_MSER_setDelta( + self, + delta, + ); + } + + late final _cv_MSER_setDeltaPtr = + _lookup>( + 'cv_MSER_setDelta'); + late final _cv_MSER_setDelta = + _cv_MSER_setDeltaPtr.asFunction(); + + void cv_MSER_setEdgeBlurSize( + MSER self, + int edge_blur_size, + ) { + return _cv_MSER_setEdgeBlurSize( + self, + edge_blur_size, + ); + } + + late final _cv_MSER_setEdgeBlurSizePtr = + _lookup>( + 'cv_MSER_setEdgeBlurSize'); + late final _cv_MSER_setEdgeBlurSize = + _cv_MSER_setEdgeBlurSizePtr.asFunction(); + + void cv_MSER_setMaxArea( + MSER self, + int maxArea, + ) { + return _cv_MSER_setMaxArea( + self, + maxArea, + ); + } + + late final _cv_MSER_setMaxAreaPtr = + _lookup>( + 'cv_MSER_setMaxArea'); + late final _cv_MSER_setMaxArea = + _cv_MSER_setMaxAreaPtr.asFunction(); + + void cv_MSER_setMaxEvolution( + MSER self, + int maxEvolution, + ) { + return _cv_MSER_setMaxEvolution( + self, + maxEvolution, + ); + } + + late final _cv_MSER_setMaxEvolutionPtr = + _lookup>( + 'cv_MSER_setMaxEvolution'); + late final _cv_MSER_setMaxEvolution = + _cv_MSER_setMaxEvolutionPtr.asFunction(); + + void cv_MSER_setMaxVariation( + MSER self, + double maxVariation, + ) { + return _cv_MSER_setMaxVariation( + self, + maxVariation, + ); + } + + late final _cv_MSER_setMaxVariationPtr = + _lookup>( + 'cv_MSER_setMaxVariation'); + late final _cv_MSER_setMaxVariation = + _cv_MSER_setMaxVariationPtr.asFunction(); + + void cv_MSER_setMinArea( + MSER self, + int minArea, + ) { + return _cv_MSER_setMinArea( + self, + minArea, + ); + } + + late final _cv_MSER_setMinAreaPtr = + _lookup>( + 'cv_MSER_setMinArea'); + late final _cv_MSER_setMinArea = + _cv_MSER_setMinAreaPtr.asFunction(); + + void cv_MSER_setMinDiversity( + MSER self, + double minDiversity, + ) { + return _cv_MSER_setMinDiversity( + self, + minDiversity, + ); + } + + late final _cv_MSER_setMinDiversityPtr = + _lookup>( + 'cv_MSER_setMinDiversity'); + late final _cv_MSER_setMinDiversity = + _cv_MSER_setMinDiversityPtr.asFunction(); + + void cv_MSER_setMinMargin( + MSER self, + double min_margin, + ) { + return _cv_MSER_setMinMargin( + self, + min_margin, + ); + } + + late final _cv_MSER_setMinMarginPtr = + _lookup>( + 'cv_MSER_setMinMargin'); + late final _cv_MSER_setMinMargin = + _cv_MSER_setMinMarginPtr.asFunction(); + + void cv_MSER_setPass2Only( + MSER self, + bool f, + ) { + return _cv_MSER_setPass2Only( + self, + f, + ); + } + + late final _cv_MSER_setPass2OnlyPtr = + _lookup>( + 'cv_MSER_setPass2Only'); + late final _cv_MSER_setPass2Only = + _cv_MSER_setPass2OnlyPtr.asFunction(); + + void cv_ORB_close( + ORBPtr self, + ) { + return _cv_ORB_close( + self, + ); + } + + late final _cv_ORB_closePtr = + _lookup>('cv_ORB_close'); + late final _cv_ORB_close = + _cv_ORB_closePtr.asFunction(); + + ffi.Pointer cv_ORB_create( + ffi.Pointer rval, + ) { + return _cv_ORB_create( + rval, + ); + } + + late final _cv_ORB_createPtr = _lookup< + ffi.NativeFunction Function(ffi.Pointer)>>( + 'cv_ORB_create'); + late final _cv_ORB_create = _cv_ORB_createPtr + .asFunction Function(ffi.Pointer)>(); + + ffi.Pointer cv_ORB_create_1( + int nfeatures, + double scaleFactor, + int nlevels, + int edgeThreshold, + int firstLevel, + int WTA_K, + int scoreType, + int patchSize, + int fastThreshold, + ffi.Pointer rval, + ) { + return _cv_ORB_create_1( + nfeatures, + scaleFactor, + nlevels, + edgeThreshold, + firstLevel, + WTA_K, + scoreType, + patchSize, + fastThreshold, + rval, + ); + } + + late final _cv_ORB_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Float, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Int, + ffi.Pointer)>>('cv_ORB_create_1'); + late final _cv_ORB_create_1 = _cv_ORB_create_1Ptr.asFunction< + ffi.Pointer Function( + int, double, int, int, int, int, int, int, int, ffi.Pointer)>(); - ffi.Pointer cv_FastFeatureDetector_detect( - FastFeatureDetector self, + ffi.Pointer cv_ORB_detect( + ORB self, Mat src, ffi.Pointer rval, + Mat mask, imp$1.CvCallback_0 callback, ) { - return _cv_FastFeatureDetector_detect( + return _cv_ORB_detect( self, src, rval, + mask, callback, ); } - late final _cv_FastFeatureDetector_detectPtr = _lookup< + late final _cv_ORB_detectPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function(ORB, Mat, ffi.Pointer, + Mat, imp$1.CvCallback_0)>>('cv_ORB_detect'); + late final _cv_ORB_detect = _cv_ORB_detectPtr.asFunction< + ffi.Pointer Function( + ORB, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_ORB_detectAndCompute( + ORB self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer out_keypoints, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_ORB_detectAndCompute( + self, + src, + mask, + desc, + out_keypoints, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_ORB_detectAndComputePtr = _lookup< ffi.NativeFunction< ffi.Pointer Function( - FastFeatureDetector, + ORB, + Mat, + Mat, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_FastFeatureDetector_detect'); - late final _cv_FastFeatureDetector_detect = - _cv_FastFeatureDetector_detectPtr.asFunction< - ffi.Pointer Function(FastFeatureDetector, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + ffi.Bool, + imp$1.CvCallback_0)>>('cv_ORB_detectAndCompute'); + late final _cv_ORB_detectAndCompute = _cv_ORB_detectAndComputePtr.asFunction< + ffi.Pointer Function(ORB, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); - void cv_FlannBasedMatcher_close( - FlannBasedMatcherPtr self, + bool cv_ORB_empty( + ORB self, ) { - return _cv_FlannBasedMatcher_close( + return _cv_ORB_empty( self, ); } - late final _cv_FlannBasedMatcher_closePtr = - _lookup>( - 'cv_FlannBasedMatcher_close'); - late final _cv_FlannBasedMatcher_close = _cv_FlannBasedMatcher_closePtr - .asFunction(); + late final _cv_ORB_emptyPtr = + _lookup>('cv_ORB_empty'); + late final _cv_ORB_empty = _cv_ORB_emptyPtr.asFunction(); - ffi.Pointer cv_FlannBasedMatcher_create( - ffi.Pointer rval, + ffi.Pointer cv_ORB_getDefaultName( + ORB self, ) { - return _cv_FlannBasedMatcher_create( - rval, + return _cv_ORB_getDefaultName( + self, ); } - late final _cv_FlannBasedMatcher_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer)>>('cv_FlannBasedMatcher_create'); - late final _cv_FlannBasedMatcher_create = - _cv_FlannBasedMatcher_createPtr.asFunction< - ffi.Pointer Function(ffi.Pointer)>(); + late final _cv_ORB_getDefaultNamePtr = + _lookup Function(ORB)>>( + 'cv_ORB_getDefaultName'); + late final _cv_ORB_getDefaultName = _cv_ORB_getDefaultNamePtr + .asFunction Function(ORB)>(); - ffi.Pointer cv_FlannBasedMatcher_create_1( - ffi.Pointer rval, - FlannIndexParams indexParams, - FlannIndexParams searchParams, + int cv_ORB_getEdgeThreshold( + ORB self, ) { - return _cv_FlannBasedMatcher_create_1( - rval, - indexParams, - searchParams, + return _cv_ORB_getEdgeThreshold( + self, ); } - late final _cv_FlannBasedMatcher_create_1Ptr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer, - FlannIndexParams, - FlannIndexParams)>>('cv_FlannBasedMatcher_create_1'); - late final _cv_FlannBasedMatcher_create_1 = - _cv_FlannBasedMatcher_create_1Ptr.asFunction< - ffi.Pointer Function(ffi.Pointer, - FlannIndexParams, FlannIndexParams)>(); + late final _cv_ORB_getEdgeThresholdPtr = + _lookup>( + 'cv_ORB_getEdgeThreshold'); + late final _cv_ORB_getEdgeThreshold = + _cv_ORB_getEdgeThresholdPtr.asFunction(); - ffi.Pointer cv_FlannBasedMatcher_knnMatch( - FlannBasedMatcher self, - Mat query, - Mat train, - int k, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_ORB_getFastThreshold( + ORB self, ) { - return _cv_FlannBasedMatcher_knnMatch( + return _cv_ORB_getFastThreshold( self, - query, - train, - k, - rval, - callback, ); } - late final _cv_FlannBasedMatcher_knnMatchPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - FlannBasedMatcher, - Mat, - Mat, - ffi.Int, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_FlannBasedMatcher_knnMatch'); - late final _cv_FlannBasedMatcher_knnMatch = - _cv_FlannBasedMatcher_knnMatchPtr.asFunction< - ffi.Pointer Function(FlannBasedMatcher, Mat, Mat, int, - ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_getFastThresholdPtr = + _lookup>( + 'cv_ORB_getFastThreshold'); + late final _cv_ORB_getFastThreshold = + _cv_ORB_getFastThresholdPtr.asFunction(); - void cv_GFTTDetector_close( - GFTTDetectorPtr self, + int cv_ORB_getFirstLevel( + ORB self, ) { - return _cv_GFTTDetector_close( + return _cv_ORB_getFirstLevel( self, ); } - late final _cv_GFTTDetector_closePtr = - _lookup>( - 'cv_GFTTDetector_close'); - late final _cv_GFTTDetector_close = - _cv_GFTTDetector_closePtr.asFunction(); + late final _cv_ORB_getFirstLevelPtr = + _lookup>( + 'cv_ORB_getFirstLevel'); + late final _cv_ORB_getFirstLevel = + _cv_ORB_getFirstLevelPtr.asFunction(); - ffi.Pointer cv_GFTTDetector_create( - ffi.Pointer rval, + int cv_ORB_getMaxFeatures( + ORB self, ) { - return _cv_GFTTDetector_create( - rval, + return _cv_ORB_getMaxFeatures( + self, ); } - late final _cv_GFTTDetector_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Pointer)>>('cv_GFTTDetector_create'); - late final _cv_GFTTDetector_create = _cv_GFTTDetector_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_ORB_getMaxFeaturesPtr = + _lookup>( + 'cv_ORB_getMaxFeatures'); + late final _cv_ORB_getMaxFeatures = + _cv_ORB_getMaxFeaturesPtr.asFunction(); - ffi.Pointer cv_GFTTDetector_detect( - GFTTDetector self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_ORB_getNLevels( + ORB self, ) { - return _cv_GFTTDetector_detect( + return _cv_ORB_getNLevels( self, - src, - rval, - callback, ); } - late final _cv_GFTTDetector_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - GFTTDetector, - Mat, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_GFTTDetector_detect'); - late final _cv_GFTTDetector_detect = _cv_GFTTDetector_detectPtr.asFunction< - ffi.Pointer Function( - GFTTDetector, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_getNLevelsPtr = + _lookup>('cv_ORB_getNLevels'); + late final _cv_ORB_getNLevels = + _cv_ORB_getNLevelsPtr.asFunction(); - void cv_KAZE_close( - KAZEPtr self, + int cv_ORB_getPatchSize( + ORB self, ) { - return _cv_KAZE_close( + return _cv_ORB_getPatchSize( self, ); } - late final _cv_KAZE_closePtr = - _lookup>('cv_KAZE_close'); - late final _cv_KAZE_close = - _cv_KAZE_closePtr.asFunction(); + late final _cv_ORB_getPatchSizePtr = + _lookup>('cv_ORB_getPatchSize'); + late final _cv_ORB_getPatchSize = + _cv_ORB_getPatchSizePtr.asFunction(); - ffi.Pointer cv_KAZE_create( - ffi.Pointer rval, + double cv_ORB_getScaleFactor( + ORB self, ) { - return _cv_KAZE_create( - rval, + return _cv_ORB_getScaleFactor( + self, ); } - late final _cv_KAZE_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(ffi.Pointer)>>('cv_KAZE_create'); - late final _cv_KAZE_create = _cv_KAZE_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_ORB_getScaleFactorPtr = + _lookup>( + 'cv_ORB_getScaleFactor'); + late final _cv_ORB_getScaleFactor = + _cv_ORB_getScaleFactorPtr.asFunction(); - ffi.Pointer cv_KAZE_detect( - KAZE self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_ORB_getScoreType( + ORB self, ) { - return _cv_KAZE_detect( + return _cv_ORB_getScoreType( self, - src, - rval, - callback, ); } - late final _cv_KAZE_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(KAZE, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_KAZE_detect'); - late final _cv_KAZE_detect = _cv_KAZE_detectPtr.asFunction< - ffi.Pointer Function( - KAZE, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_getScoreTypePtr = + _lookup>('cv_ORB_getScoreType'); + late final _cv_ORB_getScoreType = + _cv_ORB_getScoreTypePtr.asFunction(); - ffi.Pointer cv_KAZE_detectAndCompute( - KAZE self, - Mat src, - Mat mask, - Mat desc, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int cv_ORB_getWTA_K( + ORB self, ) { - return _cv_KAZE_detectAndCompute( + return _cv_ORB_getWTA_K( self, - src, - mask, - desc, - rval, - callback, ); } - late final _cv_KAZE_detectAndComputePtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - KAZE, - Mat, - Mat, - Mat, - ffi.Pointer, - imp$1.CvCallback_0)>>('cv_KAZE_detectAndCompute'); - late final _cv_KAZE_detectAndCompute = - _cv_KAZE_detectAndComputePtr.asFunction< - ffi.Pointer Function(KAZE, Mat, Mat, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_getWTA_KPtr = + _lookup>('cv_ORB_getWTA_K'); + late final _cv_ORB_getWTA_K = + _cv_ORB_getWTA_KPtr.asFunction(); - void cv_MSER_close( - MSERPtr self, + void cv_ORB_setEdgeThreshold( + ORB self, + int edgeThreshold, ) { - return _cv_MSER_close( + return _cv_ORB_setEdgeThreshold( self, + edgeThreshold, ); } - late final _cv_MSER_closePtr = - _lookup>('cv_MSER_close'); - late final _cv_MSER_close = - _cv_MSER_closePtr.asFunction(); + late final _cv_ORB_setEdgeThresholdPtr = + _lookup>( + 'cv_ORB_setEdgeThreshold'); + late final _cv_ORB_setEdgeThreshold = + _cv_ORB_setEdgeThresholdPtr.asFunction(); - ffi.Pointer cv_MSER_create( - ffi.Pointer rval, + void cv_ORB_setFastThreshold( + ORB self, + int fastThreshold, ) { - return _cv_MSER_create( - rval, + return _cv_ORB_setFastThreshold( + self, + fastThreshold, ); } - late final _cv_MSER_createPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(ffi.Pointer)>>('cv_MSER_create'); - late final _cv_MSER_create = _cv_MSER_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_ORB_setFastThresholdPtr = + _lookup>( + 'cv_ORB_setFastThreshold'); + late final _cv_ORB_setFastThreshold = + _cv_ORB_setFastThresholdPtr.asFunction(); - ffi.Pointer cv_MSER_detect( - MSER self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + void cv_ORB_setFirstLevel( + ORB self, + int firstLevel, ) { - return _cv_MSER_detect( + return _cv_ORB_setFirstLevel( self, - src, - rval, - callback, + firstLevel, ); } - late final _cv_MSER_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(MSER, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_MSER_detect'); - late final _cv_MSER_detect = _cv_MSER_detectPtr.asFunction< - ffi.Pointer Function( - MSER, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_setFirstLevelPtr = + _lookup>( + 'cv_ORB_setFirstLevel'); + late final _cv_ORB_setFirstLevel = + _cv_ORB_setFirstLevelPtr.asFunction(); - void cv_ORB_close( - ORBPtr self, + void cv_ORB_setMaxFeatures( + ORB self, + int maxFeatures, ) { - return _cv_ORB_close( + return _cv_ORB_setMaxFeatures( self, + maxFeatures, ); } - late final _cv_ORB_closePtr = - _lookup>('cv_ORB_close'); - late final _cv_ORB_close = - _cv_ORB_closePtr.asFunction(); + late final _cv_ORB_setMaxFeaturesPtr = + _lookup>( + 'cv_ORB_setMaxFeatures'); + late final _cv_ORB_setMaxFeatures = + _cv_ORB_setMaxFeaturesPtr.asFunction(); - ffi.Pointer cv_ORB_create( - ffi.Pointer rval, + void cv_ORB_setNLevels( + ORB self, + int nlevels, ) { - return _cv_ORB_create( - rval, + return _cv_ORB_setNLevels( + self, + nlevels, ); } - late final _cv_ORB_createPtr = _lookup< - ffi.NativeFunction Function(ffi.Pointer)>>( - 'cv_ORB_create'); - late final _cv_ORB_create = _cv_ORB_createPtr - .asFunction Function(ffi.Pointer)>(); + late final _cv_ORB_setNLevelsPtr = + _lookup>( + 'cv_ORB_setNLevels'); + late final _cv_ORB_setNLevels = + _cv_ORB_setNLevelsPtr.asFunction(); - ffi.Pointer cv_ORB_create_1( - int nfeatures, - double scaleFactor, - int nlevels, - int edgeThreshold, - int firstLevel, - int WTA_K, - int scoreType, + void cv_ORB_setPatchSize( + ORB self, int patchSize, - int fastThreshold, - ffi.Pointer rval, ) { - return _cv_ORB_create_1( - nfeatures, - scaleFactor, - nlevels, - edgeThreshold, - firstLevel, - WTA_K, - scoreType, + return _cv_ORB_setPatchSize( + self, patchSize, - fastThreshold, - rval, ); } - late final _cv_ORB_create_1Ptr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ffi.Int, - ffi.Float, - ffi.Int, - ffi.Int, - ffi.Int, - ffi.Int, - ffi.Int, - ffi.Int, - ffi.Int, - ffi.Pointer)>>('cv_ORB_create_1'); - late final _cv_ORB_create_1 = _cv_ORB_create_1Ptr.asFunction< - ffi.Pointer Function( - int, double, int, int, int, int, int, int, int, ffi.Pointer)>(); + late final _cv_ORB_setPatchSizePtr = + _lookup>( + 'cv_ORB_setPatchSize'); + late final _cv_ORB_setPatchSize = + _cv_ORB_setPatchSizePtr.asFunction(); + + void cv_ORB_setScaleFactor( + ORB self, + double scaleFactor, + ) { + return _cv_ORB_setScaleFactor( + self, + scaleFactor, + ); + } + + late final _cv_ORB_setScaleFactorPtr = + _lookup>( + 'cv_ORB_setScaleFactor'); + late final _cv_ORB_setScaleFactor = + _cv_ORB_setScaleFactorPtr.asFunction(); - ffi.Pointer cv_ORB_detect( + void cv_ORB_setScoreType( ORB self, - Mat src, - ffi.Pointer rval, - imp$1.CvCallback_0 callback, + int scoreType, ) { - return _cv_ORB_detect( + return _cv_ORB_setScoreType( self, - src, - rval, - callback, + scoreType, ); } - late final _cv_ORB_detectPtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function(ORB, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_ORB_detect'); - late final _cv_ORB_detect = _cv_ORB_detectPtr.asFunction< - ffi.Pointer Function( - ORB, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + late final _cv_ORB_setScoreTypePtr = + _lookup>( + 'cv_ORB_setScoreType'); + late final _cv_ORB_setScoreType = + _cv_ORB_setScoreTypePtr.asFunction(); - ffi.Pointer cv_ORB_detectAndCompute( + void cv_ORB_setWTA_K( ORB self, - Mat src, - Mat mask, - ffi.Pointer out_keypoints, - Mat desc, - bool useProvidedKeypoints, - imp$1.CvCallback_0 callback, + int wta_k, ) { - return _cv_ORB_detectAndCompute( + return _cv_ORB_setWTA_K( self, - src, - mask, - out_keypoints, - desc, - useProvidedKeypoints, - callback, + wta_k, ); } - late final _cv_ORB_detectAndComputePtr = _lookup< - ffi.NativeFunction< - ffi.Pointer Function( - ORB, - Mat, - Mat, - ffi.Pointer, - Mat, - ffi.Bool, - imp$1.CvCallback_0)>>('cv_ORB_detectAndCompute'); - late final _cv_ORB_detectAndCompute = _cv_ORB_detectAndComputePtr.asFunction< - ffi.Pointer Function(ORB, Mat, Mat, ffi.Pointer, - Mat, bool, imp$1.CvCallback_0)>(); + late final _cv_ORB_setWTA_KPtr = + _lookup>( + 'cv_ORB_setWTA_K'); + late final _cv_ORB_setWTA_K = + _cv_ORB_setWTA_KPtr.asFunction(); void cv_SIFT_close( SIFTPtr self, @@ -863,16 +3060,89 @@ class CvNativeFeatures2d { late final _cv_SIFT_create = _cv_SIFT_createPtr .asFunction Function(ffi.Pointer)>(); + ffi.Pointer cv_SIFT_create_1( + int nfeatures, + int nOctaveLayers, + double contrastThreshold, + double edgeThreshold, + double sigma, + int descriptorType, + bool enable_precise_upscale, + ffi.Pointer rval, + ) { + return _cv_SIFT_create_1( + nfeatures, + nOctaveLayers, + contrastThreshold, + edgeThreshold, + sigma, + descriptorType, + enable_precise_upscale, + rval, + ); + } + + late final _cv_SIFT_create_1Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Double, + ffi.Int, + ffi.Bool, + ffi.Pointer)>>('cv_SIFT_create_1'); + late final _cv_SIFT_create_1 = _cv_SIFT_create_1Ptr.asFunction< + ffi.Pointer Function( + int, int, double, double, double, int, bool, ffi.Pointer)>(); + + ffi.Pointer cv_SIFT_create_2( + int nfeatures, + int nOctaveLayers, + double contrastThreshold, + double edgeThreshold, + double sigma, + bool enable_precise_upscale, + ffi.Pointer rval, + ) { + return _cv_SIFT_create_2( + nfeatures, + nOctaveLayers, + contrastThreshold, + edgeThreshold, + sigma, + enable_precise_upscale, + rval, + ); + } + + late final _cv_SIFT_create_2Ptr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + ffi.Int, + ffi.Int, + ffi.Double, + ffi.Double, + ffi.Double, + ffi.Bool, + ffi.Pointer)>>('cv_SIFT_create_2'); + late final _cv_SIFT_create_2 = _cv_SIFT_create_2Ptr.asFunction< + ffi.Pointer Function( + int, int, double, double, double, bool, ffi.Pointer)>(); + ffi.Pointer cv_SIFT_detect( SIFT self, Mat src, ffi.Pointer rval, + Mat mask, imp$1.CvCallback_0 callback, ) { return _cv_SIFT_detect( self, src, rval, + mask, callback, ); } @@ -880,10 +3150,10 @@ class CvNativeFeatures2d { late final _cv_SIFT_detectPtr = _lookup< ffi.NativeFunction< ffi.Pointer Function(SIFT, Mat, ffi.Pointer, - imp$1.CvCallback_0)>>('cv_SIFT_detect'); + Mat, imp$1.CvCallback_0)>>('cv_SIFT_detect'); late final _cv_SIFT_detect = _cv_SIFT_detectPtr.asFunction< ffi.Pointer Function( - SIFT, Mat, ffi.Pointer, imp$1.CvCallback_0)>(); + SIFT, Mat, ffi.Pointer, Mat, imp$1.CvCallback_0)>(); ffi.Pointer cv_SIFT_detectAndCompute( SIFT self, @@ -891,6 +3161,7 @@ class CvNativeFeatures2d { Mat mask, Mat desc, ffi.Pointer rval, + bool useProvidedKeypoints, imp$1.CvCallback_0 callback, ) { return _cv_SIFT_detectAndCompute( @@ -899,6 +3170,7 @@ class CvNativeFeatures2d { mask, desc, rval, + useProvidedKeypoints, callback, ); } @@ -911,11 +3183,189 @@ class CvNativeFeatures2d { Mat, Mat, ffi.Pointer, + ffi.Bool, imp$1.CvCallback_0)>>('cv_SIFT_detectAndCompute'); late final _cv_SIFT_detectAndCompute = _cv_SIFT_detectAndComputePtr.asFunction< ffi.Pointer Function(SIFT, Mat, Mat, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_SIFT_empty( + SIFT self, + ) { + return _cv_SIFT_empty( + self, + ); + } + + late final _cv_SIFT_emptyPtr = + _lookup>('cv_SIFT_empty'); + late final _cv_SIFT_empty = + _cv_SIFT_emptyPtr.asFunction(); + + double cv_SIFT_getContrastThreshold( + SIFT self, + ) { + return _cv_SIFT_getContrastThreshold( + self, + ); + } + + late final _cv_SIFT_getContrastThresholdPtr = + _lookup>( + 'cv_SIFT_getContrastThreshold'); + late final _cv_SIFT_getContrastThreshold = + _cv_SIFT_getContrastThresholdPtr.asFunction(); + + ffi.Pointer cv_SIFT_getDefaultName( + SIFT self, + ) { + return _cv_SIFT_getDefaultName( + self, + ); + } + + late final _cv_SIFT_getDefaultNamePtr = + _lookup Function(SIFT)>>( + 'cv_SIFT_getDefaultName'); + late final _cv_SIFT_getDefaultName = _cv_SIFT_getDefaultNamePtr + .asFunction Function(SIFT)>(); + + double cv_SIFT_getEdgeThreshold( + SIFT self, + ) { + return _cv_SIFT_getEdgeThreshold( + self, + ); + } + + late final _cv_SIFT_getEdgeThresholdPtr = + _lookup>( + 'cv_SIFT_getEdgeThreshold'); + late final _cv_SIFT_getEdgeThreshold = + _cv_SIFT_getEdgeThresholdPtr.asFunction(); + + int cv_SIFT_getNFeatures( + SIFT self, + ) { + return _cv_SIFT_getNFeatures( + self, + ); + } + + late final _cv_SIFT_getNFeaturesPtr = + _lookup>( + 'cv_SIFT_getNFeatures'); + late final _cv_SIFT_getNFeatures = + _cv_SIFT_getNFeaturesPtr.asFunction(); + + int cv_SIFT_getNOctaveLayers( + SIFT self, + ) { + return _cv_SIFT_getNOctaveLayers( + self, + ); + } + + late final _cv_SIFT_getNOctaveLayersPtr = + _lookup>( + 'cv_SIFT_getNOctaveLayers'); + late final _cv_SIFT_getNOctaveLayers = + _cv_SIFT_getNOctaveLayersPtr.asFunction(); + + double cv_SIFT_getSigma( + SIFT self, + ) { + return _cv_SIFT_getSigma( + self, + ); + } + + late final _cv_SIFT_getSigmaPtr = + _lookup>( + 'cv_SIFT_getSigma'); + late final _cv_SIFT_getSigma = + _cv_SIFT_getSigmaPtr.asFunction(); + + void cv_SIFT_setContrastThreshold( + SIFT self, + double contrastThreshold, + ) { + return _cv_SIFT_setContrastThreshold( + self, + contrastThreshold, + ); + } + + late final _cv_SIFT_setContrastThresholdPtr = + _lookup>( + 'cv_SIFT_setContrastThreshold'); + late final _cv_SIFT_setContrastThreshold = _cv_SIFT_setContrastThresholdPtr + .asFunction(); + + void cv_SIFT_setEdgeThreshold( + SIFT self, + double edgeThreshold, + ) { + return _cv_SIFT_setEdgeThreshold( + self, + edgeThreshold, + ); + } + + late final _cv_SIFT_setEdgeThresholdPtr = + _lookup>( + 'cv_SIFT_setEdgeThreshold'); + late final _cv_SIFT_setEdgeThreshold = + _cv_SIFT_setEdgeThresholdPtr.asFunction(); + + void cv_SIFT_setNFeatures( + SIFT self, + int maxFeatures, + ) { + return _cv_SIFT_setNFeatures( + self, + maxFeatures, + ); + } + + late final _cv_SIFT_setNFeaturesPtr = + _lookup>( + 'cv_SIFT_setNFeatures'); + late final _cv_SIFT_setNFeatures = + _cv_SIFT_setNFeaturesPtr.asFunction(); + + void cv_SIFT_setNOctaveLayers( + SIFT self, + int nOctaveLayers, + ) { + return _cv_SIFT_setNOctaveLayers( + self, + nOctaveLayers, + ); + } + + late final _cv_SIFT_setNOctaveLayersPtr = + _lookup>( + 'cv_SIFT_setNOctaveLayers'); + late final _cv_SIFT_setNOctaveLayers = + _cv_SIFT_setNOctaveLayersPtr.asFunction(); + + void cv_SIFT_setSigma( + SIFT self, + double sigma, + ) { + return _cv_SIFT_setSigma( + self, + sigma, + ); + } + + late final _cv_SIFT_setSigmaPtr = + _lookup>( + 'cv_SIFT_setSigma'); + late final _cv_SIFT_setSigma = + _cv_SIFT_setSigmaPtr.asFunction(); ffi.Pointer cv_SimpleBlobDetectorParams_create( ffi.Pointer rval, @@ -989,12 +3439,14 @@ class CvNativeFeatures2d { SimpleBlobDetector self, Mat src, ffi.Pointer rval, + Mat mask, imp$1.CvCallback_0 callback, ) { return _cv_SimpleBlobDetector_detect( self, src, rval, + mask, callback, ); } @@ -1005,11 +3457,127 @@ class CvNativeFeatures2d { SimpleBlobDetector, Mat, ffi.Pointer, + Mat, imp$1.CvCallback_0)>>('cv_SimpleBlobDetector_detect'); late final _cv_SimpleBlobDetector_detect = _cv_SimpleBlobDetector_detectPtr.asFunction< ffi.Pointer Function(SimpleBlobDetector, Mat, - ffi.Pointer, imp$1.CvCallback_0)>(); + ffi.Pointer, Mat, imp$1.CvCallback_0)>(); + + ffi.Pointer cv_SimpleBlobDetector_detectAndCompute( + SimpleBlobDetector self, + Mat src, + Mat mask, + Mat desc, + ffi.Pointer rval, + bool useProvidedKeypoints, + imp$1.CvCallback_0 callback, + ) { + return _cv_SimpleBlobDetector_detectAndCompute( + self, + src, + mask, + desc, + rval, + useProvidedKeypoints, + callback, + ); + } + + late final _cv_SimpleBlobDetector_detectAndComputePtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + SimpleBlobDetector, + Mat, + Mat, + Mat, + ffi.Pointer, + ffi.Bool, + imp$1.CvCallback_0)>>('cv_SimpleBlobDetector_detectAndCompute'); + late final _cv_SimpleBlobDetector_detectAndCompute = + _cv_SimpleBlobDetector_detectAndComputePtr.asFunction< + ffi.Pointer Function(SimpleBlobDetector, Mat, Mat, Mat, + ffi.Pointer, bool, imp$1.CvCallback_0)>(); + + bool cv_SimpleBlobDetector_empty( + SimpleBlobDetector self, + ) { + return _cv_SimpleBlobDetector_empty( + self, + ); + } + + late final _cv_SimpleBlobDetector_emptyPtr = + _lookup>( + 'cv_SimpleBlobDetector_empty'); + late final _cv_SimpleBlobDetector_empty = _cv_SimpleBlobDetector_emptyPtr + .asFunction(); + + ffi.Pointer cv_SimpleBlobDetector_getBlobContours( + SimpleBlobDetector self, + ) { + return _cv_SimpleBlobDetector_getBlobContours( + self, + ); + } + + late final _cv_SimpleBlobDetector_getBlobContoursPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + SimpleBlobDetector)>>('cv_SimpleBlobDetector_getBlobContours'); + late final _cv_SimpleBlobDetector_getBlobContours = + _cv_SimpleBlobDetector_getBlobContoursPtr + .asFunction Function(SimpleBlobDetector)>(); + + ffi.Pointer cv_SimpleBlobDetector_getDefaultName( + SimpleBlobDetector self, + ) { + return _cv_SimpleBlobDetector_getDefaultName( + self, + ); + } + + late final _cv_SimpleBlobDetector_getDefaultNamePtr = _lookup< + ffi + .NativeFunction Function(SimpleBlobDetector)>>( + 'cv_SimpleBlobDetector_getDefaultName'); + late final _cv_SimpleBlobDetector_getDefaultName = + _cv_SimpleBlobDetector_getDefaultNamePtr + .asFunction Function(SimpleBlobDetector)>(); + + ffi.Pointer cv_SimpleBlobDetector_getParams( + SimpleBlobDetector self, + ) { + return _cv_SimpleBlobDetector_getParams( + self, + ); + } + + late final _cv_SimpleBlobDetector_getParamsPtr = _lookup< + ffi.NativeFunction< + ffi.Pointer Function( + SimpleBlobDetector)>>('cv_SimpleBlobDetector_getParams'); + late final _cv_SimpleBlobDetector_getParams = + _cv_SimpleBlobDetector_getParamsPtr.asFunction< + ffi.Pointer Function(SimpleBlobDetector)>(); + + void cv_SimpleBlobDetector_setParams( + SimpleBlobDetector self, + SimpleBlobDetectorParams params, + ) { + return _cv_SimpleBlobDetector_setParams( + self, + params, + ); + } + + late final _cv_SimpleBlobDetector_setParamsPtr = _lookup< + ffi.NativeFunction< + ffi.Void Function(SimpleBlobDetector, + SimpleBlobDetectorParams)>>('cv_SimpleBlobDetector_setParams'); + late final _cv_SimpleBlobDetector_setParams = + _cv_SimpleBlobDetector_setParamsPtr.asFunction< + void Function(SimpleBlobDetector, SimpleBlobDetectorParams)>(); ffi.Pointer cv_drawKeyPoints( Mat src, @@ -1642,8 +4210,11 @@ final class SimpleBlobDetectorParams extends ffi.Struct { typedef SimpleBlobDetectorPtr = ffi.Pointer; typedef VecChar = imp$1.VecChar; typedef VecDMatch = imp$1.VecDMatch; +typedef VecF32 = imp$1.VecF32; typedef VecF64 = imp$1.VecF64; typedef VecI32 = imp$1.VecI32; typedef VecKeyPoint = imp$1.VecKeyPoint; +typedef VecRect = imp$1.VecRect; typedef VecVecChar = imp$1.VecVecChar; typedef VecVecDMatch = imp$1.VecVecDMatch; +typedef VecVecPoint = imp$1.VecVecPoint; diff --git a/packages/dartcv/lib/src/g/features2d.yaml b/packages/dartcv/lib/src/g/features2d.yaml index 498680b2..33bd1001 100644 --- a/packages/dartcv/lib/src/g/features2d.yaml +++ b/packages/dartcv/lib/src/g/features2d.yaml @@ -14,16 +14,74 @@ files: name: cv_AKAZE_close c:@F@cv_AKAZE_create: name: cv_AKAZE_create + c:@F@cv_AKAZE_create_1: + name: cv_AKAZE_create_1 c:@F@cv_AKAZE_detect: name: cv_AKAZE_detect c:@F@cv_AKAZE_detectAndCompute: name: cv_AKAZE_detectAndCompute + c:@F@cv_AKAZE_empty: + name: cv_AKAZE_empty + c:@F@cv_AKAZE_getDefaultName: + name: cv_AKAZE_getDefaultName + c:@F@cv_AKAZE_getDescriptorChannels: + name: cv_AKAZE_getDescriptorChannels + c:@F@cv_AKAZE_getDescriptorSize: + name: cv_AKAZE_getDescriptorSize + c:@F@cv_AKAZE_getDescriptorType: + name: cv_AKAZE_getDescriptorType + c:@F@cv_AKAZE_getDiffusivity: + name: cv_AKAZE_getDiffusivity + c:@F@cv_AKAZE_getMaxPoints: + name: cv_AKAZE_getMaxPoints + c:@F@cv_AKAZE_getNOctaveLayers: + name: cv_AKAZE_getNOctaveLayers + c:@F@cv_AKAZE_getNOctaves: + name: cv_AKAZE_getNOctaves + c:@F@cv_AKAZE_getThreshold: + name: cv_AKAZE_getThreshold + c:@F@cv_AKAZE_setDescriptorChannels: + name: cv_AKAZE_setDescriptorChannels + c:@F@cv_AKAZE_setDescriptorSize: + name: cv_AKAZE_setDescriptorSize + c:@F@cv_AKAZE_setDescriptorType: + name: cv_AKAZE_setDescriptorType + c:@F@cv_AKAZE_setDiffusivity: + name: cv_AKAZE_setDiffusivity + c:@F@cv_AKAZE_setMaxPoints: + name: cv_AKAZE_setMaxPoints + c:@F@cv_AKAZE_setNOctaveLayers: + name: cv_AKAZE_setNOctaveLayers + c:@F@cv_AKAZE_setNOctaves: + name: cv_AKAZE_setNOctaves + c:@F@cv_AKAZE_setThreshold: + name: cv_AKAZE_setThreshold c:@F@cv_AgastFeatureDetector_close: name: cv_AgastFeatureDetector_close c:@F@cv_AgastFeatureDetector_create: name: cv_AgastFeatureDetector_create + c:@F@cv_AgastFeatureDetector_create_1: + name: cv_AgastFeatureDetector_create_1 c:@F@cv_AgastFeatureDetector_detect: name: cv_AgastFeatureDetector_detect + c:@F@cv_AgastFeatureDetector_detectAndCompute: + name: cv_AgastFeatureDetector_detectAndCompute + c:@F@cv_AgastFeatureDetector_empty: + name: cv_AgastFeatureDetector_empty + c:@F@cv_AgastFeatureDetector_getDefaultName: + name: cv_AgastFeatureDetector_getDefaultName + c:@F@cv_AgastFeatureDetector_getNonmaxSuppression: + name: cv_AgastFeatureDetector_getNonmaxSuppression + c:@F@cv_AgastFeatureDetector_getThreshold: + name: cv_AgastFeatureDetector_getThreshold + c:@F@cv_AgastFeatureDetector_getType: + name: cv_AgastFeatureDetector_getType + c:@F@cv_AgastFeatureDetector_setNonmaxSuppression: + name: cv_AgastFeatureDetector_setNonmaxSuppression + c:@F@cv_AgastFeatureDetector_setThreshold: + name: cv_AgastFeatureDetector_setThreshold + c:@F@cv_AgastFeatureDetector_setType: + name: cv_AgastFeatureDetector_setType c:@F@cv_BFMatcher_close: name: cv_BFMatcher_close c:@F@cv_BFMatcher_create: @@ -38,10 +96,32 @@ files: name: cv_BRISK_close c:@F@cv_BRISK_create: name: cv_BRISK_create + c:@F@cv_BRISK_create_1: + name: cv_BRISK_create_1 + c:@F@cv_BRISK_create_2: + name: cv_BRISK_create_2 + c:@F@cv_BRISK_create_3: + name: cv_BRISK_create_3 c:@F@cv_BRISK_detect: name: cv_BRISK_detect c:@F@cv_BRISK_detectAndCompute: name: cv_BRISK_detectAndCompute + c:@F@cv_BRISK_empty: + name: cv_BRISK_empty + c:@F@cv_BRISK_getDefaultName: + name: cv_BRISK_getDefaultName + c:@F@cv_BRISK_getOctaves: + name: cv_BRISK_getOctaves + c:@F@cv_BRISK_getPatternScale: + name: cv_BRISK_getPatternScale + c:@F@cv_BRISK_getThreshold: + name: cv_BRISK_getThreshold + c:@F@cv_BRISK_setOctaves: + name: cv_BRISK_setOctaves + c:@F@cv_BRISK_setPatternScale: + name: cv_BRISK_setPatternScale + c:@F@cv_BRISK_setThreshold: + name: cv_BRISK_setThreshold c:@F@cv_FastFeatureDetector_close: name: cv_FastFeatureDetector_close c:@F@cv_FastFeatureDetector_create: @@ -50,6 +130,24 @@ files: name: cv_FastFeatureDetector_create_1 c:@F@cv_FastFeatureDetector_detect: name: cv_FastFeatureDetector_detect + c:@F@cv_FastFeatureDetector_detectAndCompute: + name: cv_FastFeatureDetector_detectAndCompute + c:@F@cv_FastFeatureDetector_empty: + name: cv_FastFeatureDetector_empty + c:@F@cv_FastFeatureDetector_getDefaultName: + name: cv_FastFeatureDetector_getDefaultName + c:@F@cv_FastFeatureDetector_getNonmaxSuppression: + name: cv_FastFeatureDetector_getNonmaxSuppression + c:@F@cv_FastFeatureDetector_getThreshold: + name: cv_FastFeatureDetector_getThreshold + c:@F@cv_FastFeatureDetector_getType: + name: cv_FastFeatureDetector_getType + c:@F@cv_FastFeatureDetector_setNonmaxSuppression: + name: cv_FastFeatureDetector_setNonmaxSuppression + c:@F@cv_FastFeatureDetector_setThreshold: + name: cv_FastFeatureDetector_setThreshold + c:@F@cv_FastFeatureDetector_setType: + name: cv_FastFeatureDetector_setType c:@F@cv_FlannBasedMatcher_close: name: cv_FlannBasedMatcher_close c:@F@cv_FlannBasedMatcher_create: @@ -62,22 +160,140 @@ files: name: cv_GFTTDetector_close c:@F@cv_GFTTDetector_create: name: cv_GFTTDetector_create + c:@F@cv_GFTTDetector_create_1: + name: cv_GFTTDetector_create_1 + c:@F@cv_GFTTDetector_create_2: + name: cv_GFTTDetector_create_2 c:@F@cv_GFTTDetector_detect: name: cv_GFTTDetector_detect + c:@F@cv_GFTTDetector_detectAndCompute: + name: cv_GFTTDetector_detectAndCompute + c:@F@cv_GFTTDetector_empty: + name: cv_GFTTDetector_empty + c:@F@cv_GFTTDetector_getBlockSize: + name: cv_GFTTDetector_getBlockSize + c:@F@cv_GFTTDetector_getDefaultName: + name: cv_GFTTDetector_getDefaultName + c:@F@cv_GFTTDetector_getGradientSize: + name: cv_GFTTDetector_getGradientSize + c:@F@cv_GFTTDetector_getHarrisDetector: + name: cv_GFTTDetector_getHarrisDetector + c:@F@cv_GFTTDetector_getK: + name: cv_GFTTDetector_getK + c:@F@cv_GFTTDetector_getMaxFeatures: + name: cv_GFTTDetector_getMaxFeatures + c:@F@cv_GFTTDetector_getMinDistance: + name: cv_GFTTDetector_getMinDistance + c:@F@cv_GFTTDetector_getQualityLevel: + name: cv_GFTTDetector_getQualityLevel + c:@F@cv_GFTTDetector_setBlockSize: + name: cv_GFTTDetector_setBlockSize + c:@F@cv_GFTTDetector_setGradientSize: + name: cv_GFTTDetector_setGradientSize + c:@F@cv_GFTTDetector_setHarrisDetector: + name: cv_GFTTDetector_setHarrisDetector + c:@F@cv_GFTTDetector_setK: + name: cv_GFTTDetector_setK + c:@F@cv_GFTTDetector_setMaxFeatures: + name: cv_GFTTDetector_setMaxFeatures + c:@F@cv_GFTTDetector_setMinDistance: + name: cv_GFTTDetector_setMinDistance + c:@F@cv_GFTTDetector_setQualityLevel: + name: cv_GFTTDetector_setQualityLevel c:@F@cv_KAZE_close: name: cv_KAZE_close c:@F@cv_KAZE_create: name: cv_KAZE_create + c:@F@cv_KAZE_create_1: + name: cv_KAZE_create_1 c:@F@cv_KAZE_detect: name: cv_KAZE_detect c:@F@cv_KAZE_detectAndCompute: name: cv_KAZE_detectAndCompute + c:@F@cv_KAZE_empty: + name: cv_KAZE_empty + c:@F@cv_KAZE_getDefaultName: + name: cv_KAZE_getDefaultName + c:@F@cv_KAZE_getDiffusivity: + name: cv_KAZE_getDiffusivity + c:@F@cv_KAZE_getExtended: + name: cv_KAZE_getExtended + c:@F@cv_KAZE_getNOctaveLayers: + name: cv_KAZE_getNOctaveLayers + c:@F@cv_KAZE_getNOctaves: + name: cv_KAZE_getNOctaves + c:@F@cv_KAZE_getThreshold: + name: cv_KAZE_getThreshold + c:@F@cv_KAZE_getUpright: + name: cv_KAZE_getUpright + c:@F@cv_KAZE_setDiffusivity: + name: cv_KAZE_setDiffusivity + c:@F@cv_KAZE_setExtended: + name: cv_KAZE_setExtended + c:@F@cv_KAZE_setNOctaveLayers: + name: cv_KAZE_setNOctaveLayers + c:@F@cv_KAZE_setNOctaves: + name: cv_KAZE_setNOctaves + c:@F@cv_KAZE_setThreshold: + name: cv_KAZE_setThreshold + c:@F@cv_KAZE_setUpright: + name: cv_KAZE_setUpright c:@F@cv_MSER_close: name: cv_MSER_close c:@F@cv_MSER_create: name: cv_MSER_create + c:@F@cv_MSER_create_1: + name: cv_MSER_create_1 c:@F@cv_MSER_detect: name: cv_MSER_detect + c:@F@cv_MSER_detectAndCompute: + name: cv_MSER_detectAndCompute + c:@F@cv_MSER_detectRegions: + name: cv_MSER_detectRegions + c:@F@cv_MSER_empty: + name: cv_MSER_empty + c:@F@cv_MSER_getAreaThreshold: + name: cv_MSER_getAreaThreshold + c:@F@cv_MSER_getDefaultName: + name: cv_MSER_getDefaultName + c:@F@cv_MSER_getDelta: + name: cv_MSER_getDelta + c:@F@cv_MSER_getEdgeBlurSize: + name: cv_MSER_getEdgeBlurSize + c:@F@cv_MSER_getMaxArea: + name: cv_MSER_getMaxArea + c:@F@cv_MSER_getMaxEvolution: + name: cv_MSER_getMaxEvolution + c:@F@cv_MSER_getMaxVariation: + name: cv_MSER_getMaxVariation + c:@F@cv_MSER_getMinArea: + name: cv_MSER_getMinArea + c:@F@cv_MSER_getMinDiversity: + name: cv_MSER_getMinDiversity + c:@F@cv_MSER_getMinMargin: + name: cv_MSER_getMinMargin + c:@F@cv_MSER_getPass2Only: + name: cv_MSER_getPass2Only + c:@F@cv_MSER_setAreaThreshold: + name: cv_MSER_setAreaThreshold + c:@F@cv_MSER_setDelta: + name: cv_MSER_setDelta + c:@F@cv_MSER_setEdgeBlurSize: + name: cv_MSER_setEdgeBlurSize + c:@F@cv_MSER_setMaxArea: + name: cv_MSER_setMaxArea + c:@F@cv_MSER_setMaxEvolution: + name: cv_MSER_setMaxEvolution + c:@F@cv_MSER_setMaxVariation: + name: cv_MSER_setMaxVariation + c:@F@cv_MSER_setMinArea: + name: cv_MSER_setMinArea + c:@F@cv_MSER_setMinDiversity: + name: cv_MSER_setMinDiversity + c:@F@cv_MSER_setMinMargin: + name: cv_MSER_setMinMargin + c:@F@cv_MSER_setPass2Only: + name: cv_MSER_setPass2Only c:@F@cv_ORB_close: name: cv_ORB_close c:@F@cv_ORB_create: @@ -88,14 +304,82 @@ files: name: cv_ORB_detect c:@F@cv_ORB_detectAndCompute: name: cv_ORB_detectAndCompute + c:@F@cv_ORB_empty: + name: cv_ORB_empty + c:@F@cv_ORB_getDefaultName: + name: cv_ORB_getDefaultName + c:@F@cv_ORB_getEdgeThreshold: + name: cv_ORB_getEdgeThreshold + c:@F@cv_ORB_getFastThreshold: + name: cv_ORB_getFastThreshold + c:@F@cv_ORB_getFirstLevel: + name: cv_ORB_getFirstLevel + c:@F@cv_ORB_getMaxFeatures: + name: cv_ORB_getMaxFeatures + c:@F@cv_ORB_getNLevels: + name: cv_ORB_getNLevels + c:@F@cv_ORB_getPatchSize: + name: cv_ORB_getPatchSize + c:@F@cv_ORB_getScaleFactor: + name: cv_ORB_getScaleFactor + c:@F@cv_ORB_getScoreType: + name: cv_ORB_getScoreType + c:@F@cv_ORB_getWTA_K: + name: cv_ORB_getWTA_K + c:@F@cv_ORB_setEdgeThreshold: + name: cv_ORB_setEdgeThreshold + c:@F@cv_ORB_setFastThreshold: + name: cv_ORB_setFastThreshold + c:@F@cv_ORB_setFirstLevel: + name: cv_ORB_setFirstLevel + c:@F@cv_ORB_setMaxFeatures: + name: cv_ORB_setMaxFeatures + c:@F@cv_ORB_setNLevels: + name: cv_ORB_setNLevels + c:@F@cv_ORB_setPatchSize: + name: cv_ORB_setPatchSize + c:@F@cv_ORB_setScaleFactor: + name: cv_ORB_setScaleFactor + c:@F@cv_ORB_setScoreType: + name: cv_ORB_setScoreType + c:@F@cv_ORB_setWTA_K: + name: cv_ORB_setWTA_K c:@F@cv_SIFT_close: name: cv_SIFT_close c:@F@cv_SIFT_create: name: cv_SIFT_create + c:@F@cv_SIFT_create_1: + name: cv_SIFT_create_1 + c:@F@cv_SIFT_create_2: + name: cv_SIFT_create_2 c:@F@cv_SIFT_detect: name: cv_SIFT_detect c:@F@cv_SIFT_detectAndCompute: name: cv_SIFT_detectAndCompute + c:@F@cv_SIFT_empty: + name: cv_SIFT_empty + c:@F@cv_SIFT_getContrastThreshold: + name: cv_SIFT_getContrastThreshold + c:@F@cv_SIFT_getDefaultName: + name: cv_SIFT_getDefaultName + c:@F@cv_SIFT_getEdgeThreshold: + name: cv_SIFT_getEdgeThreshold + c:@F@cv_SIFT_getNFeatures: + name: cv_SIFT_getNFeatures + c:@F@cv_SIFT_getNOctaveLayers: + name: cv_SIFT_getNOctaveLayers + c:@F@cv_SIFT_getSigma: + name: cv_SIFT_getSigma + c:@F@cv_SIFT_setContrastThreshold: + name: cv_SIFT_setContrastThreshold + c:@F@cv_SIFT_setEdgeThreshold: + name: cv_SIFT_setEdgeThreshold + c:@F@cv_SIFT_setNFeatures: + name: cv_SIFT_setNFeatures + c:@F@cv_SIFT_setNOctaveLayers: + name: cv_SIFT_setNOctaveLayers + c:@F@cv_SIFT_setSigma: + name: cv_SIFT_setSigma c:@F@cv_SimpleBlobDetectorParams_create: name: cv_SimpleBlobDetectorParams_create c:@F@cv_SimpleBlobDetector_close: @@ -106,6 +390,18 @@ files: name: cv_SimpleBlobDetector_create_1 c:@F@cv_SimpleBlobDetector_detect: name: cv_SimpleBlobDetector_detect + c:@F@cv_SimpleBlobDetector_detectAndCompute: + name: cv_SimpleBlobDetector_detectAndCompute + c:@F@cv_SimpleBlobDetector_empty: + name: cv_SimpleBlobDetector_empty + c:@F@cv_SimpleBlobDetector_getBlobContours: + name: cv_SimpleBlobDetector_getBlobContours + c:@F@cv_SimpleBlobDetector_getDefaultName: + name: cv_SimpleBlobDetector_getDefaultName + c:@F@cv_SimpleBlobDetector_getParams: + name: cv_SimpleBlobDetector_getParams + c:@F@cv_SimpleBlobDetector_setParams: + name: cv_SimpleBlobDetector_setParams c:@F@cv_drawKeyPoints: name: cv_drawKeyPoints c:@F@cv_drawMatches: @@ -200,13 +496,19 @@ files: name: VecChar c:types.h@T@VecDMatch: name: VecDMatch + c:types.h@T@VecF32: + name: VecF32 c:types.h@T@VecF64: name: VecF64 c:types.h@T@VecI32: name: VecI32 c:types.h@T@VecKeyPoint: name: VecKeyPoint + c:types.h@T@VecRect: + name: VecRect c:types.h@T@VecVecChar: name: VecVecChar c:types.h@T@VecVecDMatch: name: VecVecDMatch + c:types.h@T@VecVecPoint: + name: VecVecPoint diff --git a/packages/dartcv/lib/src/g/gapi.g.dart b/packages/dartcv/lib/src/g/gapi.g.dart index b6be85a5..3b2461c3 100644 --- a/packages/dartcv/lib/src/g/gapi.g.dart +++ b/packages/dartcv/lib/src/g/gapi.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/highgui.g.dart b/packages/dartcv/lib/src/g/highgui.g.dart index d8c60a7d..39a3f538 100644 --- a/packages/dartcv/lib/src/g/highgui.g.dart +++ b/packages/dartcv/lib/src/g/highgui.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/imgcodecs.g.dart b/packages/dartcv/lib/src/g/imgcodecs.g.dart index c111c955..dd0c1e0d 100644 --- a/packages/dartcv/lib/src/g/imgcodecs.g.dart +++ b/packages/dartcv/lib/src/g/imgcodecs.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/imgproc.g.dart b/packages/dartcv/lib/src/g/imgproc.g.dart index ccec59ca..e007fd1b 100644 --- a/packages/dartcv/lib/src/g/imgproc.g.dart +++ b/packages/dartcv/lib/src/g/imgproc.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/objdetect.g.dart b/packages/dartcv/lib/src/g/objdetect.g.dart index 98bc8c09..41e33839 100644 --- a/packages/dartcv/lib/src/g/objdetect.g.dart +++ b/packages/dartcv/lib/src/g/objdetect.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/photo.g.dart b/packages/dartcv/lib/src/g/photo.g.dart index 650d3823..a0780397 100644 --- a/packages/dartcv/lib/src/g/photo.g.dart +++ b/packages/dartcv/lib/src/g/photo.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/stitching.g.dart b/packages/dartcv/lib/src/g/stitching.g.dart index b5976150..98170346 100644 --- a/packages/dartcv/lib/src/g/stitching.g.dart +++ b/packages/dartcv/lib/src/g/stitching.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/types.g.dart b/packages/dartcv/lib/src/g/types.g.dart index 4c027957..4b4b556e 100644 --- a/packages/dartcv/lib/src/g/types.g.dart +++ b/packages/dartcv/lib/src/g/types.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 @@ -422,6 +421,12 @@ final class TermCriteria extends ffi.Struct { external double epsilon; } +final class UMat extends ffi.Struct { + external ffi.Pointer ptr; +} + +typedef UMatPtr = ffi.Pointer; + final class UsacParams extends ffi.Struct { @ffi.Double() external double confidence; diff --git a/packages/dartcv/lib/src/g/types.yaml b/packages/dartcv/lib/src/g/types.yaml index 4fbf0247..7a1a370a 100644 --- a/packages/dartcv/lib/src/g/types.yaml +++ b/packages/dartcv/lib/src/g/types.yaml @@ -74,6 +74,8 @@ files: name: Scalar c:@S@TermCriteria: name: TermCriteria + c:@S@UMat: + name: UMat c:@S@UsacParams: name: UsacParams c:@S@Vec2b: @@ -232,6 +234,8 @@ files: name: MatPtr c:types.h@T@RNGPtr: name: RNGPtr + c:types.h@T@UMatPtr: + name: UMatPtr c:types.h@T@VecCharPtr: name: VecCharPtr c:types.h@T@VecDMatchPtr: diff --git a/packages/dartcv/lib/src/g/video.g.dart b/packages/dartcv/lib/src/g/video.g.dart index a865ec20..40d9c060 100644 --- a/packages/dartcv/lib/src/g/video.g.dart +++ b/packages/dartcv/lib/src/g/video.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/lib/src/g/videoio.g.dart b/packages/dartcv/lib/src/g/videoio.g.dart index 525a1268..0659e895 100644 --- a/packages/dartcv/lib/src/g/videoio.g.dart +++ b/packages/dartcv/lib/src/g/videoio.g.dart @@ -1,6 +1,5 @@ // coverage:ignore-file // opencv_dart - OpenCV bindings for Dart language -// some c wrappers were from gocv: https://github.com/hybridgroup/gocv // License: Apache-2.0 https://github.com/hybridgroup/gocv/blob/release/LICENSE.txt // Author: Rainyl // License: Apache-2.0 diff --git a/packages/dartcv/pubspec.yaml b/packages/dartcv/pubspec.yaml index e9e521ce..6191facb 100644 --- a/packages/dartcv/pubspec.yaml +++ b/packages/dartcv/pubspec.yaml @@ -1,8 +1,7 @@ name: dartcv4 description: > - OpenCV bindings for Dart language. dartcv is for pure dart only, - for Flutter, use opencv_core or opencv_dart. -version: 1.1.7 + OpenCV image processing library bindings for Dart language. +version: 1.1.8 repository: https://github.com/rainyl/opencv_dart homepage: https://github.com/rainyl/opencv_dart/tree/main/packages/dartcv @@ -10,7 +9,7 @@ environment: sdk: ">=3.3.0 <4.0.0" dependencies: - ffi: ^2.1.3 + ffi: ^2.1.4 yaml: ^3.1.3 dev_dependencies: diff --git a/packages/dartcv/src b/packages/dartcv/src index d929abd0..062d47e6 160000 --- a/packages/dartcv/src +++ b/packages/dartcv/src @@ -1 +1 @@ -Subproject commit d929abd0d1db541bac56fc4629b54e4939913c63 +Subproject commit 062d47e6ce03ef69ffc202e351e65df7bc40c15e diff --git a/packages/dartcv/test/calib3d/calib3d_test.dart b/packages/dartcv/test/calib3d/calib3d_test.dart index df00d0bf..76eb32fd 100644 --- a/packages/dartcv/test/calib3d/calib3d_test.dart +++ b/packages/dartcv/test/calib3d/calib3d_test.dart @@ -163,21 +163,30 @@ void main() async { }); test('cv.correctMatches', () async { - final mat = cv.Mat.from2DList([ - [133.28685454, 201.58760058, 126.28483141], - [218.85759196, 214.70814757, 21.50269382], - [151.29091759, 190.73174272, 38.21554576], - ], cv.MatType.CV_32FC1); - final points3 = cv.Mat.from2DList([ - [190.85422998, 55.05627181], - [240.19097166, 206.9696157], - [186.65860323, 123.41471593], - ], cv.MatType.CV_32FC1).reshape(2, 1); - final points2 = cv.Mat.from2DList([ - [115.73466756, 63.21138927], - [84.3030183, 225.45245352], - [34.09884804, 98.12981797], - ], cv.MatType.CV_32FC1).reshape(2, 1); + final mat = cv.Mat.from2DList( + [ + [133.28685454, 201.58760058, 126.28483141], + [218.85759196, 214.70814757, 21.50269382], + [151.29091759, 190.73174272, 38.21554576], + ], + cv.MatType.CV_32FC1, + ); + final points3 = cv.Mat.from2DList( + [ + [190.85422998, 55.05627181], + [240.19097166, 206.9696157], + [186.65860323, 123.41471593], + ], + cv.MatType.CV_32FC1, + ).reshape(2, 1); + final points2 = cv.Mat.from2DList( + [ + [115.73466756, 63.21138927], + [84.3030183, 225.45245352], + [34.09884804, 98.12981797], + ], + cv.MatType.CV_32FC1, + ).reshape(2, 1); { final (newPoints1, newPoints2) = cv.correctMatches(mat, points2, points3); @@ -193,11 +202,14 @@ void main() async { }); test('cv.decomposeEssentialMat', () async { - final E = cv.Mat.from2DList([ - [0.0, -1.0, 0.0], - [1.0, 0.0, -1.0], - [0.0, 1.0, 0.0], - ], cv.MatType.CV_64FC1); + final E = cv.Mat.from2DList( + [ + [0.0, -1.0, 0.0], + [1.0, 0.0, -1.0], + [0.0, 1.0, 0.0], + ], + cv.MatType.CV_64FC1, + ); { final (r1, r2, t) = cv.decomposeEssentialMat(E); expect(r1.isEmpty, false); @@ -214,16 +226,22 @@ void main() async { }); test('cv.decomposeHomographyMat', () async { - final H = cv.Mat.from2DList([ - [1.0, 0.2, 100.0], - [0.1, 1.0, 200.0], - [0.001, 0.002, 1.0], - ], cv.MatType.CV_64FC1); - final K = cv.Mat.from2DList([ - [1000, 0, 320], - [0, 1000, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final H = cv.Mat.from2DList( + [ + [1.0, 0.2, 100.0], + [0.1, 1.0, 200.0], + [0.001, 0.002, 1.0], + ], + cv.MatType.CV_64FC1, + ); + final K = cv.Mat.from2DList( + [ + [1000, 0, 320], + [0, 1000, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); { final (numSolutions, rotations, translations, normals) = cv.decomposeHomographyMat(H, K); @@ -243,11 +261,14 @@ void main() async { }); test('cv.decomposeProjectionMatrix', () async { - final P = cv.Mat.from2DList([ - [500, 0, 320, 0], - [0, 500, 240, 0], - [0, 0, 1, 0], - ], cv.MatType.CV_64FC1); + final P = cv.Mat.from2DList( + [ + [500, 0, 320, 0], + [0, 500, 240, 0], + [0, 0, 1, 0], + ], + cv.MatType.CV_64FC1, + ); { final (cameraMatrix, rotMatrix, transVec) = cv.decomposeProjectionMatrix(P); expect(cameraMatrix.isEmpty, false); @@ -265,22 +286,31 @@ void main() async { test('cv.drawFrameAxes', () async { final image = cv.imread("test/images/lenna.png"); - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); final distCoeffs = cv.Mat.zeros(4, 1, cv.MatType.CV_64FC1); - final rvec = cv.Mat.from2DList([ - [0.1], - [0.2], - [0.3], - ], cv.MatType.CV_64FC1); - final tvec = cv.Mat.from2DList([ - [0.0], - [0.0], - [5.0], - ], cv.MatType.CV_64FC1); + final rvec = cv.Mat.from2DList( + [ + [0.1], + [0.2], + [0.3], + ], + cv.MatType.CV_64FC1, + ); + final tvec = cv.Mat.from2DList( + [ + [0.0], + [0.0], + [5.0], + ], + cv.MatType.CV_64FC1, + ); { cv.drawFrameAxes(image, cameraMatrix, distCoeffs, rvec, tvec, 3.0); // cv.imwrite("a.png", image); @@ -326,19 +356,25 @@ void main() async { }); test('cv.estimateAffine3D', () async { - final src = cv.Mat.from2DList([ - [10, 10, 10], - [10, 10, 20], - [10, 20, 10], - [10, 20, 20], - ], cv.MatType.CV_32FC1); + final src = cv.Mat.from2DList( + [ + [10, 10, 10], + [10, 10, 20], + [10, 20, 10], + [10, 20, 20], + ], + cv.MatType.CV_32FC1, + ); - final dst = cv.Mat.from2DList([ - [-20, 20, 20], - [-20, 20, 40], - [20, 40, 20], - [20, 40, 40], - ], cv.MatType.CV_32FC1); + final dst = cv.Mat.from2DList( + [ + [-20, 20, 20], + [-20, 20, 40], + [20, 40, 20], + [20, 40, 40], + ], + cv.MatType.CV_32FC1, + ); { final (rval, rt, inliers) = cv.estimateAffine3D(src, dst); @@ -356,34 +392,40 @@ void main() async { }); test('cv.estimateTranslation3D', () async { - final src = cv.Mat.from3DList([ - [ - [0.0, 0.0, 0.0], - ], - [ - [1.0, 0.0, 0.0], - ], + final src = cv.Mat.from3DList( [ - [0.0, 1.0, 0.0], + [ + [0.0, 0.0, 0.0], + ], + [ + [1.0, 0.0, 0.0], + ], + [ + [0.0, 1.0, 0.0], + ], + [ + [0.0, 0.0, 1.0], + ], ], + cv.MatType.CV_32FC3, + ); + final dst = cv.Mat.from3DList( [ - [0.0, 0.0, 1.0], + [ + [1.0, 1.0, 1.0], + ], + [ + [2.0, 1.0, 1.0], + ], + [ + [1.0, 2.0, 1.0], + ], + [ + [1.0, 1.0, 2.0], + ], ], - ], cv.MatType.CV_32FC3); - final dst = cv.Mat.from3DList([ - [ - [1.0, 1.0, 1.0], - ], - [ - [2.0, 1.0, 1.0], - ], - [ - [1.0, 2.0, 1.0], - ], - [ - [1.0, 1.0, 2.0], - ], - ], cv.MatType.CV_32FC3); + cv.MatType.CV_32FC3, + ); { final (ret, translation, inliers) = cv.estimateTranslation3D(src, dst); expect(ret, 1); @@ -417,30 +459,39 @@ void main() async { }); test('cv.filterHomographyDecompByVisibleRefpoints', () async { - final H = cv.Mat.from2DList([ - [1.0, 0.2, 100.0], - [0.1, 1.0, 200.0], - [0.001, 0.002, 1.0], - ], cv.MatType.CV_64FC1); - final K = cv.Mat.from2DList([ - [1000, 0, 320], - [0, 1000, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); - final refPoints = cv.Mat.from3DList([ - [ - [100, 100], - ], + final H = cv.Mat.from2DList( [ - [200, 100], + [1.0, 0.2, 100.0], + [0.1, 1.0, 200.0], + [0.001, 0.002, 1.0], ], + cv.MatType.CV_64FC1, + ); + final K = cv.Mat.from2DList( [ - [200, 200], + [1000, 0, 320], + [0, 1000, 240], + [0, 0, 1], ], + cv.MatType.CV_64FC1, + ); + final refPoints = cv.Mat.from3DList( [ - [100, 200], + [ + [100, 100], + ], + [ + [200, 100], + ], + [ + [200, 200], + ], + [ + [100, 200], + ], ], - ], cv.MatType.CV_32FC2); + cv.MatType.CV_32FC2, + ); { final (numSolutions, rotations, translations, normals) = cv.decomposeHomographyMat(H, K); @@ -485,17 +536,17 @@ void main() async { final (ret, corners) = cv.findChessboardCorners(image, (4, 6)); expect(ret, true); - final _corners = cv.Mat.fromVec(corners); + final corners_ = cv.Mat.fromVec(corners); { - final refinedCorners = cv.find4QuadCornerSubpix(image, _corners, (5, 5)); + final refinedCorners = cv.find4QuadCornerSubpix(image, corners_, (5, 5)); expect(refinedCorners, true); - expect(_corners.isEmpty, false); + expect(corners_.isEmpty, false); } { - final refinedCorners = await cv.find4QuadCornerSubpixAsync(image, _corners, (5, 5)); + final refinedCorners = await cv.find4QuadCornerSubpixAsync(image, corners_, (5, 5)); expect(refinedCorners, true); - expect(_corners.isEmpty, false); + expect(corners_.isEmpty, false); } }); @@ -603,25 +654,34 @@ void main() async { }); test('cv.findEssentialMat', () async { - final points1 = cv.Mat.from2DList([ - [150, 200], - [130, 210], - [120, 230], - [110, 250], - [100, 270], - ], cv.MatType.CV_32FC1); - final points2 = cv.Mat.from2DList([ - [152, 202], - [132, 212], - [122, 232], - [112, 252], - [102, 272], - ], cv.MatType.CV_32FC1); - final K = cv.Mat.from2DList([ - [1000, 0, 320], - [0, 1000, 240], - [0, 0, 1], - ], cv.MatType.CV_32FC1); + final points1 = cv.Mat.from2DList( + [ + [150, 200], + [130, 210], + [120, 230], + [110, 250], + [100, 270], + ], + cv.MatType.CV_32FC1, + ); + final points2 = cv.Mat.from2DList( + [ + [152, 202], + [132, 212], + [122, 232], + [112, 252], + [102, 272], + ], + cv.MatType.CV_32FC1, + ); + final K = cv.Mat.from2DList( + [ + [1000, 0, 320], + [0, 1000, 240], + [0, 0, 1], + ], + cv.MatType.CV_32FC1, + ); { final E = cv.findEssentialMatCameraMatrix(points1, points2, K); expect(E.isEmpty, false); @@ -687,59 +747,65 @@ void main() async { }); test('cv.findHomographyUsac', () async { - final points1 = cv.Mat.from3DList([ - [ - [150, 200], - ], - [ - [130, 210], - ], - [ - [120, 230], - ], - [ - [110, 250], - ], + final points1 = cv.Mat.from3DList( [ - [200, 100], + [ + [150, 200], + ], + [ + [130, 210], + ], + [ + [120, 230], + ], + [ + [110, 250], + ], + [ + [200, 100], + ], + [ + [210, 120], + ], + [ + [230, 140], + ], + [ + [250, 160], + ], ], - [ - [210, 120], - ], - [ - [230, 140], - ], - [ - [250, 160], - ], - ], cv.MatType.CV_32FC2); + cv.MatType.CV_32FC2, + ); - final points2 = cv.Mat.from3DList([ + final points2 = cv.Mat.from3DList( [ - [152, 202], + [ + [152, 202], + ], + [ + [132, 212], + ], + [ + [122, 232], + ], + [ + [112, 252], + ], + [ + [202, 102], + ], + [ + [212, 122], + ], + [ + [232, 142], + ], + [ + [252, 162], + ], ], - [ - [132, 212], - ], - [ - [122, 232], - ], - [ - [112, 252], - ], - [ - [202, 102], - ], - [ - [212, 122], - ], - [ - [232, 142], - ], - [ - [252, 162], - ], - ], cv.MatType.CV_32FC2); + cv.MatType.CV_32FC2, + ); final mask = cv.Mat.empty(); { @@ -764,27 +830,33 @@ void main() async { }); test('cv.findFundamentalMat', () async { - final imgPt1 = cv.Mat.from2DList([ - [150, 200], - [130, 210], - [120, 230], - [110, 250], - [200, 100], - [210, 120], - [230, 140], - [250, 160], - ], cv.MatType.CV_32FC1); - - final imgPt2 = cv.Mat.from2DList([ - [152, 202], - [132, 212], - [122, 232], - [112, 252], - [202, 102], - [212, 122], - [232, 142], - [252, 162], - ], cv.MatType.CV_32FC1); + final imgPt1 = cv.Mat.from2DList( + [ + [150, 200], + [130, 210], + [120, 230], + [110, 250], + [200, 100], + [210, 120], + [230, 140], + [250, 160], + ], + cv.MatType.CV_32FC1, + ); + + final imgPt2 = cv.Mat.from2DList( + [ + [152, 202], + [132, 212], + [122, 232], + [112, 252], + [202, 102], + [212, 122], + [232, 142], + [252, 162], + ], + cv.MatType.CV_32FC1, + ); { final m = cv.findFundamentalMat(imgPt1, imgPt2, method: cv.FM_RANSAC); @@ -816,14 +888,20 @@ void main() async { }); test('cv.getDefaultNewCameraMatrix', () async { - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_32FC1); - final distCoeffs = cv.Mat.from2DList([ - [-0.1, 0.1, 0, 0], - ], cv.MatType.CV_32FC1); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_32FC1, + ); + final distCoeffs = cv.Mat.from2DList( + [ + [-0.1, 0.1, 0, 0], + ], + cv.MatType.CV_32FC1, + ); { final newCameraMatrix = cv.getDefaultNewCameraMatrix( cameraMatrix, @@ -983,39 +1061,51 @@ void main() async { }); test('cv.recoverPoseCameraMatrix', () async { - final essential = cv.Mat.from2DList([ - [1.503247056657373e-16, -7.074103796034695e-16, -7.781514175638166e-16], - [6.720398606232961e-16, -6.189840821530359e-17, -0.7071067811865476], - [7.781514175638166e-16, 0.7071067811865475, -2.033804841359975e-16], - ], cv.MatType.CV_64FC1); - - final p1 = cv.Mat.from2DList([ - [1017.0883, 848.23529], - [1637, 848.23529], - [1637, 1648.7059], - [1017.0883, 1648.7059], - [2282.2144, 772], - [3034.9644, 772], - [3034.9644, 1744], - [2282.2144, 1744], - ], cv.MatType.CV_64FC1); - - final p2 = cv.Mat.from2DList([ - [414.88824, 848.23529], - [1034.8, 848.23529], - [1034.8, 1648.7059], - [414.88824, 1648.7059], - [1550.9714, 772], - [2303.7214, 772], - [2303.7214, 1744], - [1550.9714, 1744], - ], cv.MatType.CV_64FC1); - - final k = cv.Mat.from2DList([ - [3011, 0, 1637], - [0, 3024, 1204], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final essential = cv.Mat.from2DList( + [ + [1.503247056657373e-16, -7.074103796034695e-16, -7.781514175638166e-16], + [6.720398606232961e-16, -6.189840821530359e-17, -0.7071067811865476], + [7.781514175638166e-16, 0.7071067811865475, -2.033804841359975e-16], + ], + cv.MatType.CV_64FC1, + ); + + final p1 = cv.Mat.from2DList( + [ + [1017.0883, 848.23529], + [1637, 848.23529], + [1637, 1648.7059], + [1017.0883, 1648.7059], + [2282.2144, 772], + [3034.9644, 772], + [3034.9644, 1744], + [2282.2144, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final p2 = cv.Mat.from2DList( + [ + [414.88824, 848.23529], + [1034.8, 848.23529], + [1034.8, 1648.7059], + [414.88824, 1648.7059], + [1550.9714, 772], + [2303.7214, 772], + [2303.7214, 1744], + [1550.9714, 1744], + ], + cv.MatType.CV_64FC1, + ); + + final k = cv.Mat.from2DList( + [ + [3011, 0, 1637], + [0, 3024, 1204], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); { final (rval, r, t, _) = cv.recoverPoseCameraMatrix(essential, p1, p2, k); @@ -1033,33 +1123,42 @@ void main() async { }); test('cv.recoverPose', () async { - final points1 = cv.Mat.from2DList([ - [150, 200], - [130, 210], - [120, 230], - [110, 250], - [200, 100], - [210, 120], - [230, 140], - [250, 160], - ], cv.MatType.CV_64FC1); - - final points2 = cv.Mat.from2DList([ - [152, 202], - [132, 212], - [122, 232], - [112, 252], - [202, 102], - [212, 122], - [232, 142], - [252, 162], - ], cv.MatType.CV_64FC1); - - final K = cv.Mat.from2DList([ - [1000, 0, 320], - [0, 1000, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final points1 = cv.Mat.from2DList( + [ + [150, 200], + [130, 210], + [120, 230], + [110, 250], + [200, 100], + [210, 120], + [230, 140], + [250, 160], + ], + cv.MatType.CV_64FC1, + ); + + final points2 = cv.Mat.from2DList( + [ + [152, 202], + [132, 212], + [122, 232], + [112, 252], + [202, 102], + [212, 122], + [232, 142], + [252, 162], + ], + cv.MatType.CV_64FC1, + ); + + final K = cv.Mat.from2DList( + [ + [1000, 0, 320], + [0, 1000, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); { final E = cv.findEssentialMatCameraMatrix(points1, points2, K, method: cv.FM_RANSAC); @@ -1079,11 +1178,14 @@ void main() async { }); test('cv.RQDecomp3x3', () async { - final K = cv.Mat.from2DList([ - [1000, 0, 320], - [0, 1000, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final K = cv.Mat.from2DList( + [ + [1000, 0, 320], + [0, 1000, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); { final (rval, R, Q) = cv.RQDecomp3x3(K); expect(rval, cv.Vec3d(0, 0, 0)); @@ -1143,43 +1245,61 @@ void main() async { }); test('cv.sampsonDistance', () { - final points1 = cv.Mat.from2DList([ - [150, 200, 1], - [130, 210, 1], - [120, 230, 1], - [110, 250, 1], - ], cv.MatType.CV_64FC1); - final points2 = cv.Mat.from2DList([ - [152, 202, 1], - [132, 212, 1], - [122, 232, 1], - [112, 252, 1], - ], cv.MatType.CV_64FC1); - final F = cv.Mat.from2DList([ - [1.292e-6, 3.303e-5, -0.004], - [-3.299e-5, 1.120e-6, 0.017], - [0.004, -0.017, 1], - ], cv.MatType.CV_64FC1); + final points1 = cv.Mat.from2DList( + [ + [150, 200, 1], + [130, 210, 1], + [120, 230, 1], + [110, 250, 1], + ], + cv.MatType.CV_64FC1, + ); + final points2 = cv.Mat.from2DList( + [ + [152, 202, 1], + [132, 212, 1], + [122, 232, 1], + [112, 252, 1], + ], + cv.MatType.CV_64FC1, + ); + final F = cv.Mat.from2DList( + [ + [1.292e-6, 3.303e-5, -0.004], + [-3.299e-5, 1.120e-6, 0.017], + [0.004, -0.017, 1], + ], + cv.MatType.CV_64FC1, + ); final sampsonDistances = cv.sampsonDistance(points1, points2, F); expect(sampsonDistances, closeTo(4034.6767, 1e-3)); }); test('cv.solveP3P', () async { - final objectPoints = cv.Mat.from2DList([ - [0, 0, 0], - [1, 0, 0], - [0, 1, 0], - ], cv.MatType.CV_64FC1); - final imagePoints = cv.Mat.from2DList([ - [320, 240], - [400, 240], - [320, 320], - ], cv.MatType.CV_64FC1); - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final objectPoints = cv.Mat.from2DList( + [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + ], + cv.MatType.CV_64FC1, + ); + final imagePoints = cv.Mat.from2DList( + [ + [320, 240], + [400, 240], + [320, 320], + ], + cv.MatType.CV_64FC1, + ); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); final distCoeffs = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); { final (ret, rvecs, tvecs) = cv.solveP3P( @@ -1211,20 +1331,26 @@ void main() async { test('cv.solvePnP', () async { final rvec = cv.Mat.fromList(3, 1, cv.MatType.CV_32FC1, [0, 0, 0]); final tvec = cv.Mat.fromList(3, 1, cv.MatType.CV_32FC1, [0, 0, 0]); - final cameraMatrix = cv.Mat.from2DList([ - [1, 0, 0], - [0, 1, 0], - [0, 0, 1], - ], cv.MatType.CV_32FC1); + final cameraMatrix = cv.Mat.from2DList( + [ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1], + ], + cv.MatType.CV_32FC1, + ); final dist = cv.Mat.fromList(5, 1, cv.MatType.CV_32FC1, [0, 0, 0, 0, 0]); - final objPts = cv.Mat.from2DList([ - [0, 0, 1], - [1, 0, 1], - [0, 1, 1], - [1, 1, 1], - [1, 0, 2], - [0, 1, 2], - ], cv.MatType.CV_32FC1); + final objPts = cv.Mat.from2DList( + [ + [0, 0, 1], + [1, 0, 1], + [0, 1, 1], + [1, 1, 1], + [1, 0, 2], + [0, 1, 2], + ], + cv.MatType.CV_32FC1, + ); { final (imgPts, jacobian) = cv.projectPoints(objPts, rvec, tvec, cameraMatrix, dist); expect(imgPts.isEmpty, false); @@ -1257,27 +1383,36 @@ void main() async { }); test('cv.solvePnPGeneric', () async { - final objectPoints = cv.Mat.from2DList([ - [0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [1, 1, 0], - [0.5, 0.5, 1], - [0, 0.5, 1], - ], cv.MatType.CV_64FC1); - final imagePoints = cv.Mat.from2DList([ - [320, 240], - [400, 240], - [320, 320], - [400, 320], - [360, 270], - [300, 250], - ], cv.MatType.CV_64FC1); - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final objectPoints = cv.Mat.from2DList( + [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + [1, 1, 0], + [0.5, 0.5, 1], + [0, 0.5, 1], + ], + cv.MatType.CV_64FC1, + ); + final imagePoints = cv.Mat.from2DList( + [ + [320, 240], + [400, 240], + [320, 320], + [400, 320], + [360, 270], + [300, 250], + ], + cv.MatType.CV_64FC1, + ); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); final distCoeffs = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); { final (ret, rvecs, tvecs, err) = cv.solvePnPGeneric( @@ -1307,27 +1442,36 @@ void main() async { }); test('cv.solvePnPRansac', () async { - final objectPoints = cv.Mat.from2DList([ - [0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [1, 1, 0], - [0.5, 0.5, 1], - [0, 0.5, 1], - ], cv.MatType.CV_64FC1); - final imagePoints = cv.Mat.from2DList([ - [320, 240], - [400, 240], - [320, 320], - [400, 320], - [360, 270], - [300, 250], - ], cv.MatType.CV_64FC1); - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final objectPoints = cv.Mat.from2DList( + [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + [1, 1, 0], + [0.5, 0.5, 1], + [0, 0.5, 1], + ], + cv.MatType.CV_64FC1, + ); + final imagePoints = cv.Mat.from2DList( + [ + [320, 240], + [400, 240], + [320, 320], + [400, 320], + [360, 270], + [300, 250], + ], + cv.MatType.CV_64FC1, + ); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); final distCoeffs = cv.Mat.zeros(1, 4, cv.MatType.CV_64FC1); { final (ret, rvec, tvec, inliers) = cv.solvePnPRansac( @@ -1410,16 +1554,22 @@ void main() async { final projMat1 = cv.Mat.zeros(3, 4, cv.MatType.CV_64FC1); final projMat2 = cv.Mat.zeros(3, 4, cv.MatType.CV_64FC1); - final projPoints1 = cv.Mat.from3DList([ + final projPoints1 = cv.Mat.from3DList( [ - [1.0, 2.0], + [ + [1.0, 2.0], + ], ], - ], cv.MatType.CV_64FC2); - final projPoints2 = cv.Mat.from3DList([ + cv.MatType.CV_64FC2, + ); + final projPoints2 = cv.Mat.from3DList( [ - [3.0, 4.0], + [ + [3.0, 4.0], + ], ], - ], cv.MatType.CV_64FC2); + cv.MatType.CV_64FC2, + ); { final homogeneous = cv.triangulatePoints(projMat1, projMat2, projPoints1, projPoints2); expect(homogeneous.isEmpty, false); @@ -1489,20 +1639,26 @@ void main() async { }); test('cv.undistortImagePoints', () async { - final cameraMatrix = cv.Mat.from2DList([ - [800, 0, 320], - [0, 800, 240], - [0, 0, 1], - ], cv.MatType.CV_64FC1); + final cameraMatrix = cv.Mat.from2DList( + [ + [800, 0, 320], + [0, 800, 240], + [0, 0, 1], + ], + cv.MatType.CV_64FC1, + ); final distCoeffs = cv.Mat.fromList(1, 4, cv.MatType.CV_64FC1, [-0.2, 0.1, 0.0, 0.0]); - final distortedPoints = cv.Mat.from3DList([ + final distortedPoints = cv.Mat.from3DList( [ - [320, 240], - [400, 240], - [320, 320], - [400, 320], + [ + [320, 240], + [400, 240], + [320, 320], + [400, 320], + ], ], - ], cv.MatType.CV_64FC2); + cv.MatType.CV_64FC2, + ); { final undistorted = cv.undistortImagePoints(distortedPoints, cameraMatrix, distCoeffs); expect(undistorted.isEmpty, false); diff --git a/packages/dartcv/test/contrib/freetype_test.dart b/packages/dartcv/test/contrib/freetype_test.dart new file mode 100644 index 00000000..b5727d4d --- /dev/null +++ b/packages/dartcv/test/contrib/freetype_test.dart @@ -0,0 +1,57 @@ +import 'dart:io'; + +import 'package:dartcv4/dartcv.dart' as cv; +import 'package:test/test.dart'; + +void main() async { + final fontFile = File("test/font/hei.ttf"); + test('cv.FreeType2 creation', () async { + final ft = cv.FreeType2.create(); + ft.loadFontData("test/font/hei.ttf", 0); + ft.setSplitNumber(1); + + final buffer = await fontFile.readAsBytes(); + ft.loadFontBuffer(buffer, 0); + }); + + test('cv.FreeType2 creation async', () async { + final ft = cv.FreeType2.create(); + await ft.loadFontDataAsync("test/font/hei.ttf", 0); + ft.setSplitNumber(1); + + final buffer = await fontFile.readAsBytes(); + await ft.loadFontBufferAsync(buffer, 0); + }); + + test('cv.FreeType2 putText', () { + final ft = cv.FreeType2.create(filename: "test/font/hei.ttf"); + + final mat = cv.Mat.zeros(100, 300, cv.MatType.CV_8UC3); + ft.putText( + mat, + r"你好Hello123'\+", + cv.Point(10, 10), + 30, + cv.Scalar.all(255), + lineType: cv.LINE_4, + thickness: cv.FILLED, + ); + cv.imwrite("test/images_out/hei.png", mat); + }); + + test('cv.FreeType2 putText async', () async { + final ft = cv.FreeType2.create(filename: "test/font/hei.ttf"); + + final mat = cv.Mat.zeros(100, 300, cv.MatType.CV_8UC3); + await ft.putTextAsync( + mat, + r"你好Hello123'\+", + cv.Point(10, 10), + 30, + cv.Scalar.all(255), + lineType: cv.LINE_4, + thickness: cv.FILLED, + ); + await cv.imwriteAsync("test/images_out/hei.png", mat); + }); +} diff --git a/packages/dartcv/test/contrib/ximgproc_test.dart b/packages/dartcv/test/contrib/ximgproc_test.dart index 514dad50..c61b2da6 100644 --- a/packages/dartcv/test/contrib/ximgproc_test.dart +++ b/packages/dartcv/test/contrib/ximgproc_test.dart @@ -295,10 +295,13 @@ void main() async { const nSize = 21; - final elementRLE = await cv.ximgproc_rl.getStructuringElementAsync(cv.MORPH_RECT, ( - nSize * 2 + 1, - nSize * 2 + 1, - )); + final elementRLE = await cv.ximgproc_rl.getStructuringElementAsync( + cv.MORPH_RECT, + ( + nSize * 2 + 1, + nSize * 2 + 1, + ), + ); final dst = await cv.ximgproc_rl.erodeAsync(src2, elementRLE, bBoundaryOn: false); expect(dst.isEmpty, false); diff --git a/packages/dartcv/test/core/core_async_test.dart b/packages/dartcv/test/core/core_async_test.dart index 93e8ab17..7fd969a1 100644 --- a/packages/dartcv/test/core/core_async_test.dart +++ b/packages/dartcv/test/core/core_async_test.dart @@ -324,7 +324,7 @@ void main() async { test('cv.LUT async', () async { Future testOneLUT(cv.Mat src, cv.Mat lut) async { expect(lut.channels == src.channels || lut.channels == 1, true); - expect(lut.isContinus, true); + expect(lut.isContinuous, true); final sw = Stopwatch(); sw.start(); final dst = await cv.LUTAsync(src, lut); @@ -358,9 +358,9 @@ void main() async { // 0-1: 65536-1-0 2-3: 65536-1-1 3-4: 65536-1-2 final lutData = switch (lutDepth) { cv.MatType.CV_32F || cv.MatType.CV_64F => List.generate( - lutSize * lutType.channels, - (i) => (lutSize - (i ~/ channel) - 1).toDouble(), - ), + lutSize * lutType.channels, + (i) => (lutSize - (i ~/ channel) - 1).toDouble(), + ), _ => List.generate(lutSize * lutType.channels, (i) => lutSize - (i ~/ channel) - 1), }; final lutInverse = cv.Mat.fromList(1, lutSize, lutType, lutData); diff --git a/packages/dartcv/test/core/core_test.dart b/packages/dartcv/test/core/core_test.dart index b8badb1d..f7ef318b 100644 --- a/packages/dartcv/test/core/core_test.dart +++ b/packages/dartcv/test/core/core_test.dart @@ -1,18 +1,39 @@ // ignore_for_file: avoid_print +import 'dart:isolate'; + import 'package:dartcv4/dartcv.dart' as cv; import 'package:test/test.dart'; void main() async { test('setLogLevel', () { - cv.setLogLevel(cv.LOG_LEVEL_ERROR); + cv.setLogLevel(cv.LogLevel.ERROR); final level = cv.getLogLevel(); - expect(level, equals(cv.LOG_LEVEL_ERROR)); + expect(level, equals(cv.LogLevel.ERROR)); }); test('getLogLevel', () { - cv.setLogLevel(cv.LOG_LEVEL_WARNING); + cv.setLogLevel(cv.LogLevel.WARNING); final level = cv.getLogLevel(); - expect(level, equals(cv.LOG_LEVEL_WARNING)); + expect(level, equals(cv.LogLevel.WARNING)); + }); + + test('cv.replaceWriteLogMessage', () { + cv.setLogLevel(cv.LogLevel.WARNING); + cv.replaceWriteLogMessage(callback: cv.defaultLogCallback); + Isolate.run(() async { + cv.writeLogMessage(cv.LogLevel.WARNING, 'This is a test log message.'); + }); + // reset log callback + cv.replaceWriteLogMessage(callback: null); + }); + + test('cv.replaceWriteLogMessageEx', () { + cv.setLogLevel(cv.LogLevel.WARNING); + cv.replaceWriteLogMessageEx(callback: cv.defaultLogCallbackEx); + Isolate.run(() async { + cv.writeLogMessageEx(cv.LogLevel.WARNING, 'This is a test log message.', file: "core_test.dart"); + }); + cv.replaceWriteLogMessageEx(callback: null); }); test('openCvVersion', () async { @@ -586,7 +607,7 @@ void main() async { test('cv.LUT', () { void testOneLUT(cv.Mat src, cv.Mat lut) { expect(lut.channels == src.channels || lut.channels == 1, true); - expect(lut.isContinus, true); + expect(lut.isContinuous, true); final sw = Stopwatch(); sw.start(); final dst = cv.LUT(src, lut); @@ -621,9 +642,9 @@ void main() async { final lutData = switch (lutDepth) { cv.MatType.CV_32F || cv.MatType.CV_16F || cv.MatType.CV_64F => List.generate( - lutSize * lutType.channels, - (i) => (lutSize - (i ~/ channel) - 1).toDouble(), - ), + lutSize * lutType.channels, + (i) => (lutSize - (i ~/ channel) - 1).toDouble(), + ), _ => List.generate(lutSize * lutType.channels, (i) => lutSize - (i ~/ channel) - 1), }; final lutInverse = cv.Mat.fromList(1, lutSize, lutType, lutData); diff --git a/packages/dartcv/test/core/mat_async_test.dart b/packages/dartcv/test/core/mat_async_test.dart index 360b57f4..6f4c3cf8 100644 --- a/packages/dartcv/test/core/mat_async_test.dart +++ b/packages/dartcv/test/core/mat_async_test.dart @@ -11,7 +11,7 @@ void main() async { expect((mat1.width, mat1.height, mat1.channels), (100, 100, 3)); expect(mat1.type, cv.MatType.CV_8UC3); expect(mat1.total, equals(100 * 100)); - expect(mat1.isContinus, equals(true)); + expect(mat1.isContinuous, equals(true)); expect(mat1.step.$1, equals(100 * 3)); expect(mat1.elemSize, equals(3)); expect(mat1.at(0, 0, 0), 255); diff --git a/packages/dartcv/test/core/mat_test.dart b/packages/dartcv/test/core/mat_test.dart index 00c9997c..3ac145c0 100644 --- a/packages/dartcv/test/core/mat_test.dart +++ b/packages/dartcv/test/core/mat_test.dart @@ -16,7 +16,7 @@ void main() async { expect((mat1.width, mat1.height, mat1.channels), (100, 100, 3)); expect(mat1.type, cv.MatType.CV_8UC3); expect(mat1.total, equals(100 * 100)); - expect(mat1.isContinus, equals(true)); + expect(mat1.isContinuous, equals(true)); expect(mat1.step.$1, equals(100 * 3)); expect(mat1.elemSize, equals(3)); expect(mat1.elemSize1, 1); diff --git a/packages/dartcv/test/core/umat_test.dart b/packages/dartcv/test/core/umat_test.dart new file mode 100644 index 00000000..a75444c1 --- /dev/null +++ b/packages/dartcv/test/core/umat_test.dart @@ -0,0 +1,183 @@ +// ignore_for_file: avoid_print + +import 'package:dartcv4/dartcv.dart' as cv; +import 'package:test/test.dart'; + +void main() async { + test('cv.UMat.empty', () { + final mat = cv.UMat.empty(); + expect(mat.isEmpty, true); + }); + + test('cv.UMat.create', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC3); + expect(mat.isEmpty, equals(false)); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 3)); + expect(mat.type, cv.MatType.CV_8UC3); + expect(mat.total, equals(100 * 100)); + expect(mat.isContinuous, equals(true)); + expect(mat.step.$1, equals(100 * 3)); + expect(mat.elemSize, equals(3)); + expect(mat.elemSize1, 1); + expect(mat.dims, 2); + expect(mat.flags, isA()); + }); + + test('cv.UMat.zeros ones eye diag', () { + final mat2 = cv.UMat.zeros(rows: 3, cols: 3, type: cv.MatType.CV_8UC1); + expect((mat2.rows, mat2.cols, mat2.channels), (3, 3, 1)); + + final mat21 = cv.UMat.zerosND([3, 3, 100, 100], cv.MatType.CV_32FC4); + expect(mat21.size, [3, 3, 100, 100]); + expect(mat21.type, cv.MatType.CV_32FC4); + + final mat3 = cv.UMat.eye(rows: 3, cols: 3, type: cv.MatType.CV_8UC3); + expect((mat3.rows, mat3.cols, mat3.channels), (3, 3, 3)); + + final mat4 = cv.UMat.ones(100, 1, cv.MatType.CV_8UC3); + expect((mat4.rows, mat4.cols, mat4.channels), (100, 1, 3)); + + final mat41 = cv.UMat.onesND([3, 3, 100, 100], cv.MatType.CV_32FC4); + expect(mat41.size, [3, 3, 100, 100]); + expect(mat41.type, cv.MatType.CV_32FC4); + + final mat5 = cv.UMat.diag(mat4); + expect((mat5.rows, mat5.cols, mat5.channels), (100, 100, 3)); + }); + + test('cv.UMat.fromSizes', () { + final mat = cv.UMat.nd([3, 3, 100, 100], cv.MatType.CV_32FC4); + expect(mat.size, [3, 3, 100, 100]); + expect(mat.type, cv.MatType.CV_32FC4); + expect(mat.dims, 4); + + final mat1 = cv.UMat.fromUMat(mat); + expect(mat1.size, [3, 3, 100, 100]); + expect(mat1.type, cv.MatType.CV_32FC4); + expect(mat1.dims, 4); + }); + + test('cv.UMat.fromRange', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC3); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 3)); + + final mat1 = cv.UMat.fromRange(mat, rowStart: 10, rowEnd: 90, colStart: 10, colEnd: 90); + expect((mat1.rows, mat1.cols, mat1.channels), (80, 80, 3)); + expect(mat1.type, cv.MatType.CV_8UC3); + + final mat2 = cv.UMat.fromRect(mat, cv.Rect(10, 10, 80, 80)); + expect((mat2.rows, mat2.cols, mat2.channels), (80, 80, 3)); + expect(mat2.type, cv.MatType.CV_8UC3); + }); + + test('cv.UMat.getMat', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC3); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 3)); + expect(mat.type, cv.MatType.CV_8UC3); + + final mat1 = mat.getMat(cv.AccessFlag.ACCESS_RW); + expect((mat1.rows, mat1.cols, mat1.channels), (100, 100, 3)); + expect(mat1.type, cv.MatType.CV_8UC3); + + mat1.setTo(cv.Scalar.all(241)); + expect(mat1.at(0, 0), 241); + + mat1.dispose(); + }); + + test('cv.UMat.row col rowRange colRange', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC3); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 3)); + expect(mat.type, cv.MatType.CV_8UC3); + + final mat1 = mat.row(0); + expect((mat1.rows, mat1.cols, mat1.channels), (1, 100, 3)); + expect(mat1.type, cv.MatType.CV_8UC3); + + final mat2 = mat.col(0); + expect((mat2.rows, mat2.cols, mat2.channels), (100, 1, 3)); + expect(mat2.type, cv.MatType.CV_8UC3); + + final mat3 = mat.rowRange(0, 10); + expect((mat3.rows, mat3.cols, mat3.channels), (10, 100, 3)); + expect(mat3.type, cv.MatType.CV_8UC3); + + final mat4 = mat.colRange(0, 10); + expect((mat4.rows, mat4.cols, mat4.channels), (100, 10, 3)); + expect(mat4.type, cv.MatType.CV_8UC3); + }); + + test('cv.UMat.diag clone', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC1); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 1)); + expect(mat.type, cv.MatType.CV_8UC1); + + final mat1 = mat.diag(d: 0); + expect((mat1.rows, mat1.cols, mat1.channels), (100, 1, 1)); + expect(mat1.type, cv.MatType.CV_8UC1); + + final mat2 = mat.clone(); + expect((mat2.rows, mat2.cols, mat2.channels), (100, 100, 1)); + expect(mat2.type, cv.MatType.CV_8UC1); + }); + + test('cv.UMat copyTo convertTo setTo reshape t inv mul dot', () { + final mat = cv.UMat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC1); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 1)); + expect(mat.type, cv.MatType.CV_8UC1); + + final dst = cv.UMat.empty(); + mat.copyTo(dst); + mat.setTo(cv.Scalar.all(241)); + expect((dst.rows, dst.cols, dst.channels), (mat.rows, mat.cols, mat.channels)); + expect(dst.type, cv.MatType.CV_8UC1); + + final mat1 = mat.convertTo(cv.MatType.CV_32FC1); + expect((mat1.rows, mat1.cols, mat1.channels), (mat.rows, mat.cols, mat.channels)); + expect(mat1.type, cv.MatType.CV_32FC1); + + final mat2 = mat1.inv(); + expect((mat2.rows, mat2.cols, mat2.channels), (mat.rows, mat.cols, mat.channels)); + expect(mat2.type, cv.MatType.CV_32FC1); + + final mat3 = mat2.mul(mat1); + expect((mat3.rows, mat3.cols, mat3.channels), (mat.rows, mat.cols, mat.channels)); + expect(mat3.type, cv.MatType.CV_32FC1); + + final mat4 = mat3.reshape(10, rows: 10); + expect((mat4.rows, mat4.cols, mat4.channels), (10, 100, 10)); + expect(mat4.type, const cv.MatType.CV_32FC(10)); + + final mat5 = mat4.reshape(1, newSizes: [100, 100]); + expect(mat5.size, [100, 100]); + expect(mat5.type, const cv.MatType.CV_32FC(1)); + + final mat6 = mat5.t(); + expect((mat6.rows, mat6.cols, mat6.channels), (100, 100, 1)); + expect(mat6.type, const cv.MatType.CV_32FC(1)); + + expect(mat6.toString(), startsWith("UMat(addr=0x")); + + mat6.release(); + mat6.dispose(); + + final res = mat3.dot(mat2); + expect(res, isA()); + }); + + test('cv.Mat.getUMat', () { + final mat = cv.Mat.create(rows: 100, cols: 100, type: cv.MatType.CV_8UC3); + expect((mat.rows, mat.cols, mat.channels), (100, 100, 3)); + expect(mat.type, cv.MatType.CV_8UC3); + + final umat = mat.getUMat(cv.AccessFlag.ACCESS_RW); + expect((umat.rows, umat.cols, umat.channels), (100, 100, 3)); + + umat.addref(); + umat.release(); + + umat.dispose(); + + mat.dispose(); + }); +} diff --git a/packages/dartcv/test/features2d/features2d_test.dart b/packages/dartcv/test/features2d/features2d_test.dart index 5ee27ad6..c9c0eac6 100644 --- a/packages/dartcv/test/features2d/features2d_test.dart +++ b/packages/dartcv/test/features2d/features2d_test.dart @@ -65,115 +65,310 @@ void main() async { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final ak = cv.AKAZE.empty(); - final kp = ak.detect(img); - expect(kp.length, greaterThan(512)); + for (final ak in [cv.AKAZE.empty(), cv.AKAZE.create()]) { + expect(ak.isEmpty, isA()); + final kp = ak.detect(img); + expect(kp.length, greaterThan(512)); + expect(ak.defaultName, 'Feature2D.AKAZE'); - final mask = cv.Mat.empty(); - final (kp2, desc) = ak.detectAndCompute(img, mask); - expect(kp2.length, greaterThan(512)); - expect(desc.isEmpty, false); + final mask = cv.Mat.empty(); + final (kp2, desc) = ak.detectAndCompute(img, mask); + expect(kp2.length, greaterThan(512)); + expect(desc.isEmpty, false); - ak.dispose(); + final type = ak.descriptorType; + expect(type, isA()); + ak.descriptorType = cv.AKAZEDescriptorType.DESCRIPTOR_KAZE; + expect(ak.descriptorType, cv.AKAZEDescriptorType.DESCRIPTOR_KAZE); + + ak.descriptorSize = 10; + expect(ak.descriptorSize, 10); + + ak.descriptorChannels = 1; + expect(ak.descriptorChannels, 1); + + ak.threshold = 0.1; + expect(ak.threshold, closeTo(0.1, 1.0e-5)); + + ak.nOctaves = 10; + expect(ak.nOctaves, 10); + + ak.nOctaveLayers = 10; + expect(ak.nOctaveLayers, 10); + + ak.diffusivity = cv.KAZEDiffusivityType.DIFF_CHARBONNIER; + expect(ak.diffusivity, cv.KAZEDiffusivityType.DIFF_CHARBONNIER); + + ak.maxPoints = 100; + expect(ak.maxPoints, 100); + + expect(ak.toString(), startsWith("AKAZE(addr=0x")); + + ak.dispose(); + } }); test('cv.AgastFeatureDetector', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final ad = cv.AgastFeatureDetector.empty(); - final kp = ad.detect(img); - expect(kp.length, greaterThan(2800)); - ad.dispose(); + for (final ad in [cv.AgastFeatureDetector.empty(), cv.AgastFeatureDetector.create()]) { + expect(ad.isEmpty, isA()); + expect(ad.defaultName, 'Feature2D.AgastFeatureDetector'); + + final kp = ad.detect(img); + expect(kp.length, greaterThan(2800)); + + expect(() => ad.detectAndCompute(cv.Mat.empty(), cv.Mat.empty()), throwsUnsupportedError); + + ad.threshold = 100; + expect(ad.threshold, closeTo(100, 1.0e-5)); + + ad.nonmaxSuppression = true; + expect(ad.nonmaxSuppression, true); + + ad.type = cv.AgastDetectorType.AGAST_5_8; + expect(ad.type, cv.AgastDetectorType.AGAST_5_8); + + expect(ad.toString(), startsWith("AgastFeatureDetector(addr=0x")); + + ad.dispose(); + } }); test('cv.BRISK', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final br = cv.BRISK.empty(); - final kp = br.detect(img); - expect(kp.length, greaterThan(512)); - - final mask = cv.Mat.empty(); - final (kp2, desc) = br.detectAndCompute(img, mask); - expect(kp2.length, greaterThan(512)); - expect(desc.isEmpty, false); - - br.dispose(); + for (final br in [ + cv.BRISK.empty(), + cv.BRISK.create(), + cv.BRISK.create1(radiusList: [1, 2, 3], numberList: [4, 5, 6]), + cv.BRISK.create2(thresh: 30, octaves: 3, radiusList: [1, 2, 3], numberList: [4, 5, 6]), + ]) { + expect(br.isEmpty, isA()); + expect(br.defaultName, 'Feature2D.BRISK'); + + final kp = br.detect(img); + expect(kp.length, greaterThan(512)); + + final mask = cv.Mat.empty(); + final (kp2, desc) = br.detectAndCompute(img, mask); + expect(kp2.length, greaterThan(512)); + expect(desc.isEmpty, false); + + br.threshold = 30; + expect(br.threshold, closeTo(30, 1.0e-5)); + + br.octaves = 3; + expect(br.octaves, 3); + + br.patternScale = 1.0; + expect(br.patternScale, closeTo(1.0, 1.0e-5)); + + expect(br.toString(), startsWith("BRISK(addr=0x")); + br.dispose(); + } }); test('cv.FastFeatureDetector', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final fd = cv.FastFeatureDetector.empty(); - final kp = fd.detect(img); - expect(kp.length, greaterThan(2690)); + for (final fd in [cv.FastFeatureDetector.empty(), cv.FastFeatureDetector.create()]) { + expect(fd.isEmpty, isA()); + expect(fd.defaultName, 'Feature2D.FastFeatureDetector'); + + final kp = fd.detect(img); + expect(kp.length, greaterThan(2690)); - final fd1 = cv.FastFeatureDetector.create(); - final kp1 = fd1.detect(img); - expect(kp1.length, greaterThan(2690)); + expect(() => fd.detectAndCompute(cv.Mat.empty(), cv.Mat.empty()), throwsUnsupportedError); - fd.dispose(); + fd.threshold = 10; + expect(fd.threshold, closeTo(10, 1.0e-5)); + + fd.nonmaxSuppression = true; + expect(fd.nonmaxSuppression, true); + + fd.type = cv.FastFeatureDetectorType.TYPE_5_8; + expect(fd.type, cv.FastFeatureDetectorType.TYPE_5_8); + + expect(fd.toString(), startsWith("FastFeatureDetector(addr=0x")); + + fd.dispose(); + } }); test('cv.GFTTDetector', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final gf = cv.GFTTDetector.empty(); - final kp = gf.detect(img); - expect(kp.length, greaterThan(512)); + for (final detector in [cv.GFTTDetector.empty(), cv.GFTTDetector.create(), cv.GFTTDetector.create1()]) { + expect(detector.isEmpty, isA()); + expect(detector.defaultName, 'Feature2D.GFTTDetector'); + + final kp = detector.detect(img); + expect(kp.length, greaterThan(512)); + + expect(() => detector.detectAndCompute(cv.Mat.empty(), cv.Mat.empty()), throwsUnsupportedError); + + detector.maxFeatures = 100; + expect(detector.maxFeatures, 100); + + detector.qualityLevel = 0.1; + expect(detector.qualityLevel, closeTo(0.1, 1.0e-5)); + + detector.minDistance = 10.0; + expect(detector.minDistance, closeTo(10.0, 1.0e-5)); + + detector.blockSize = 3; + expect(detector.blockSize, 3); - gf.dispose(); + detector.gradientSize = 10; + expect(detector.gradientSize, 10); + + detector.harrisDetector = true; + expect(detector.harrisDetector, true); + + detector.k = 0.04; + expect(detector.k, closeTo(0.04, 1.0e-5)); + + expect(detector.toString(), startsWith("GFTTDetector(addr=0x")); + + detector.dispose(); + } }); test('cv.KAZE', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final ka = cv.KAZE.empty(); - final kp = ka.detect(img); - expect(kp.length, greaterThan(0)); + for (final ka in [cv.KAZE.empty(), cv.KAZE.create()]) { + expect(ka.isEmpty, isA()); + expect(ka.defaultName, 'Feature2D.KAZE'); + + final mask = cv.Mat.empty(); + final (kp2, desc) = ka.detectAndCompute(img, mask); + expect(kp2.length, greaterThan(0)); + expect(desc.isEmpty, false); + + ka.extended = true; + expect(ka.extended, true); + + ka.upright = true; + expect(ka.upright, true); + + ka.threshold = 0.001; + expect(ka.threshold, closeTo(0.001, 1.0e-5)); + + ka.octaves = 3; + expect(ka.octaves, 3); - final mask = cv.Mat.empty(); - final (kp2, desc) = ka.detectAndCompute(img, mask); - expect(kp2.length, greaterThan(0)); - expect(desc.isEmpty, false); + ka.nOctaveLayers = 3; + expect(ka.nOctaveLayers, 3); - ka.dispose(); + ka.diffusivity = cv.KAZEDiffusivityType.DIFF_PM_G2; + expect(ka.diffusivity, cv.KAZEDiffusivityType.DIFF_PM_G2); + + expect(ka.toString(), startsWith("KAZE(addr=0x")); + + ka.dispose(); + } }); test('cv.MSER', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final gf = cv.MSER.empty(); - final kp = gf.detect(img); - expect(kp.length, greaterThan(0)); + for (final ms in [cv.MSER.empty(), cv.MSER.create()]) { + expect(ms.isEmpty, isA()); + expect(ms.defaultName, 'Feature2D.MSER'); + + final kp = ms.detect(img); + expect(kp.length, greaterThan(0)); + + ms.delta = 5; + expect(ms.delta, 5); + + ms.minArea = 100; + expect(ms.minArea, 100); + + ms.maxArea = 1000; + expect(ms.maxArea, 1000); + + ms.maxVariation = 0.01; + expect(ms.maxVariation, 0.01); + + ms.minDiversity = 0.01; + expect(ms.minDiversity, 0.01); + + ms.maxEvolution = 100; + expect(ms.maxEvolution, 100); + + ms.areaThreshold = 1.0; + expect(ms.areaThreshold, 1.0); - gf.dispose(); + ms.minMargin = 0.01; + expect(ms.minMargin, 0.01); + + ms.edgeBlurSize = 3; + expect(ms.edgeBlurSize, 3); + + ms.pass2Only = true; + expect(ms.pass2Only, true); + + expect(ms.toString(), startsWith("MSER(addr=0x")); + + ms.dispose(); + } }); test('cv.ORB', () { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_COLOR); expect(img.isEmpty, false); - final ka = cv.ORB.empty(); - final kp = ka.detect(img); + for (final orb in [cv.ORB.empty(), cv.ORB.create()]) { + expect(orb.isEmpty, isA()); + expect(orb.defaultName, 'Feature2D.ORB'); + + final kp = orb.detect(img); + expect(kp.length, 500); + + final mask = cv.Mat.empty(); + final (kp2, desc) = orb.detectAndCompute(img, mask); + expect(kp2.length, 500); + expect(desc.isEmpty, false); + + orb.maxFeatures = 100; + expect(orb.maxFeatures, 100); + + orb.scaleFactor = 1.2; + expect(orb.scaleFactor, 1.2); + + orb.nLevels = 8; + expect(orb.nLevels, 8); + + orb.edgeThreshold = 31; + expect(orb.edgeThreshold, 31); - expect(kp.length, 500); + orb.firstLevel = 0; + expect(orb.firstLevel, 0); - final orb = cv.ORB.create(); - final kp1 = orb.detect(img); - expect(kp1.length, 500); + orb.WTA_K = 2; + expect(orb.WTA_K, 2); - final mask = cv.Mat.empty(); - final (kp2, desc) = ka.detectAndCompute(img, mask); - expect(kp2.length, 500); - expect(desc.isEmpty, false); + orb.scoreType = cv.ORBScoreType.FAST_SCORE; + expect(orb.scoreType, cv.ORBScoreType.FAST_SCORE); - orb.dispose(); + orb.patchSize = 31; + expect(orb.patchSize, 31); + + orb.fastThreshold = 20; + expect(orb.fastThreshold, 20); + + expect(orb.toString(), startsWith('ORB(addr=0x')); + + orb.dispose(); + } }); test('cv.SimpleBlobDetector', () { @@ -308,6 +503,16 @@ void main() async { final kp1 = detector1.detect(img); expect(kp1.length, 0); + expect(detector1.defaultName, 'Feature2D.SimpleBlobDetector'); + expect(detector1.isEmpty, isA()); + + detector1.params = params; + expect(detector1.params, params); + + expect(detector1.getBlobContours(), isA()); + + expect(detector1.toString(), startsWith('SimpleBlobDetector(addr=0x')); + detector1.dispose(); params.dispose(); }); @@ -318,18 +523,17 @@ void main() async { final desc2 = cv.imread("test/images/sift_descriptor.png", flags: cv.IMREAD_GRAYSCALE); expect(desc2.isEmpty, false); - final matcher = cv.BFMatcher.empty(); - final dmatches = matcher.knnMatch(desc1, desc2, 2); - expect(dmatches.length, greaterThan(0)); + for (final matcher in [cv.BFMatcher.empty(), cv.BFMatcher.create()]) { + final dmatches = matcher.knnMatch(desc1, desc2, 2); + expect(dmatches.length, greaterThan(0)); - final matcher1 = cv.BFMatcher.create(); - final dmatches1 = matcher1.knnMatch(desc1, desc2, 2); - expect(dmatches1.length, greaterThan(0)); + final matches = matcher.match(desc1, desc2); + expect(matches.length, greaterThan(0)); - final matches = matcher.match(desc1, desc2); - expect(matches.length, greaterThan(0)); + expect(matcher.toString(), startsWith("BFMatcher(addr=0x")); - matcher.dispose(); + matcher.dispose(); + } }); test('cv.FlannBasedMatcher', () { @@ -364,6 +568,8 @@ void main() async { final dmatches = matcher.knnMatch(desc11, desc21, 2); expect(dmatches.length, greaterThan(0)); + expect(matcher.toString(), startsWith("FlannBasedMatcher(addr=0x")); + matcher.dispose(); } }); @@ -372,16 +578,47 @@ void main() async { final img = cv.imread("test/images/lenna.png", flags: cv.IMREAD_GRAYSCALE); expect(img.isEmpty, false); - final si = cv.SIFT.empty(); - final kp = si.detect(img); - expect(kp.length, greaterThan(0)); + for (final si in [ + cv.SIFT.empty(), + cv.SIFT.create( + nfeatures: 0, + nOctaveLayers: 3, + contrastThreshold: 0.04, + edgeThreshold: 10, + sigma: 1.6, + descriptorType: cv.MatType.CV_32F, + ), + ]) { + expect(si.defaultName, 'Feature2D.SIFT'); + expect(si.isEmpty, isA()); + + final kp = si.detect(img); + expect(kp.length, greaterThan(0)); + + final mask = cv.Mat.empty(); + final (kp2, desc) = si.detectAndCompute(img, mask); + expect(kp2.length, greaterThan(0)); + expect(desc.isEmpty, false); + + si.NFeatures = 100; + expect(si.NFeatures, 100); - final mask = cv.Mat.empty(); - final (kp2, desc) = si.detectAndCompute(img, mask); - expect(kp2.length, greaterThan(0)); - expect(desc.isEmpty, false); + si.nOctaveLayers = 10; + expect(si.nOctaveLayers, 10); - si.dispose(); + si.contrastThreshold = 0.05; + expect(si.contrastThreshold, 0.05); + + si.edgeThreshold = 15; + expect(si.edgeThreshold, 15); + + si.sigma = 2.0; + expect(si.sigma, 2.0); + + expect(si.toString(), startsWith("SIFT(addr=0x")); + + si.dispose(); + } }); test('cv.drawMatches', () { diff --git a/packages/dartcv/test/font/hei.ttf b/packages/dartcv/test/font/hei.ttf new file mode 100644 index 00000000..50ab69dd Binary files /dev/null and b/packages/dartcv/test/font/hei.ttf differ diff --git a/packages/dartcv/test/images_out/hei.png b/packages/dartcv/test/images_out/hei.png new file mode 100644 index 00000000..6af885dd Binary files /dev/null and b/packages/dartcv/test/images_out/hei.png differ diff --git a/packages/dartcv/test/videoio/videoio_test.dart b/packages/dartcv/test/videoio/videoio_test.dart index 11fc6e0d..57b3c954 100644 --- a/packages/dartcv/test/videoio/videoio_test.dart +++ b/packages/dartcv/test/videoio/videoio_test.dart @@ -19,17 +19,29 @@ void main() async { }); test('cv.VideoWriter.fromFile', () { - final writer = cv.VideoWriter.fromFile("test/images/small2.mp4", "mp4v", 60, ( - 400, - 300, - ), apiPreference: cv.CAP_ANY); + final writer = cv.VideoWriter.fromFile( + "test/images/small2.mp4", + "mp4v", + 60, + ( + 400, + 300, + ), + apiPreference: cv.CAP_ANY, + ); final frame = cv.Mat.ones(400, 300, cv.MatType.CV_8UC3); writer.write(frame); writer.release(); - final writer1 = cv.VideoWriter.fromFile("test/images/small2.mp4", "mp4v", 60, ( - 400, - 300, - ), apiPreference: cv.CAP_ANY); + final writer1 = cv.VideoWriter.fromFile( + "test/images/small2.mp4", + "mp4v", + 60, + ( + 400, + 300, + ), + apiPreference: cv.CAP_ANY, + ); expect(writer1.isOpened, true); }); diff --git a/packages/opencv_core/CHANGELOG.md b/packages/opencv_core/CHANGELOG.md index 1767bcc8..ba1d364b 100644 --- a/packages/opencv_core/CHANGELOG.md +++ b/packages/opencv_core/CHANGELOG.md @@ -1,3 +1,11 @@ +## 1.4.5 + +* bump dependencies +* add `UMat` +* [features2d] add more functions/constructions +* add custom loggers, support user-defined logger callback +* add `cv.freetype` module + ## 1.4.4 * add `Mat.reinterpret` diff --git a/packages/opencv_core/README.md b/packages/opencv_core/README.md index 47b6335f..deb6c315 100644 --- a/packages/opencv_core/README.md +++ b/packages/opencv_core/README.md @@ -7,6 +7,9 @@ OpenCV for Flutter, if `highgui` or `videoio` is required, use [opencv_dart](htt > OpenCV SDK (~100M) will be downloaded via `FetchContent` of cmake, you can > set `DARTCV_CACHE_DIR` environment variable to cache it and avoid downloading it again. > e.g., `export DARTCV_CACHE_DIR=$HOME/.cache/dartcv` +> +> - Q&A: [#212](https://github.com/rainyl/opencv_dart/issues/212) or open new issues. +> - If you are using flutter with [Native Assets](https://github.com/flutter/flutter/issues/129757) feature supported, consider using 2.x version, refer to [2.x](https://github.com/rainyl/opencv_dart/tree/2.x) ## Supported platforms @@ -48,11 +51,33 @@ hooks: - videoio ``` -- valid modules: `core`, `calib3d`, `contrib`, `dnn`, `features2d`, `flann`, `highgui`, `imgproc`, `imgcodecs`, `objdetect`, `photo`, `stitching`, `video`, `videoio` +- valid modules: `core`, `calib3d`, `contrib`, `dnn`, `features2d`, `flann`, `freetype`, `highgui`, `imgproc`, `imgcodecs`, `objdetect`, `photo`, `stitching`, `video`, `videoio` +- `freetype` module is disabled by default, if you need it, add it to `include_modules`. - Use `exclude_modules` to disable specific modules, or `include_modules` to enable specific modules. - If neither is specified, all modules except `highgui` will be enabled. - also refer to [example/pubspec.yaml](https://github.com/rainyl/opencv_dart/blob/main/packages/opencv_dart/example/pubspec.yaml) + +### Note for macOS and iOS users + +`freetype` is disabled by default, if you need them, add the following lines to `//Podfile`. + +```ruby +target 'Runner' do + use_frameworks! + use_modular_headers! + + flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__)) + + pod 'DartCvMacOS/freetype', '~> 4.12.0.2' # add this line if you need freetype on macos + pod 'DartCvIOS/freetype', '~> 4.12.0.2' # add this line if you need freetype on ios + + target 'RunnerTests' do + inherit! :search_paths + end +end +``` + ## Package Size ![opencv_dart_size_report](images/opencv_core_size_report.svg) diff --git a/packages/opencv_core/pubspec.yaml b/packages/opencv_core/pubspec.yaml index 984c0ee5..4a44d7b3 100644 --- a/packages/opencv_core/pubspec.yaml +++ b/packages/opencv_core/pubspec.yaml @@ -3,7 +3,7 @@ description: | OpenCV4 bindings for Flutter. version: 1.4.4 opencv_version: 4.12.0.0 -dartcv_version: 4.12.0.1 +dartcv_version: 4.12.0.2 repository: https://github.com/rainyl/opencv_dart homepage: https://github.com/rainyl/opencv_dart/tree/main/packages/opencv_core @@ -14,7 +14,7 @@ environment: dependencies: flutter: sdk: flutter - dartcv4: 1.1.7 + dartcv4: 1.1.8 # dartcv4: # path: ../dartcv diff --git a/packages/opencv_dart/CHANGELOG.md b/packages/opencv_dart/CHANGELOG.md index 6325f5c9..72e6ec2d 100644 --- a/packages/opencv_dart/CHANGELOG.md +++ b/packages/opencv_dart/CHANGELOG.md @@ -1,3 +1,11 @@ +## 1.4.5 + +* bump dependencies +* add `UMat` +* [features2d] add more functions/constructions +* add custom loggers, support user-defined logger callback +* add `cv.freetype` module + ## 1.4.4 * add `Mat.reinterpret` diff --git a/packages/opencv_dart/README.md b/packages/opencv_dart/README.md index d567c11a..46f9cb92 100644 --- a/packages/opencv_dart/README.md +++ b/packages/opencv_dart/README.md @@ -11,7 +11,7 @@ use [opencv_core](https://pub.dev/packages/opencv_core) > e.g., `export DARTCV_CACHE_DIR=$HOME/.cache/dartcv` > > - Q&A: [#212](https://github.com/rainyl/opencv_dart/issues/212) or open new issues. -> - ~~If you are using flutter with [Native Assets](https://github.com/flutter/flutter/issues/129757) feature supported, consider using v2.x version, see more in [native-assets branch](https://github.com/rainyl/opencv_dart/tree/native-assets)~~ Won't update until `Native Assets` being stable. +> - If you are using flutter with [Native Assets](https://github.com/flutter/flutter/issues/129757) feature supported, consider using 2.x version, refer to [2.x](https://github.com/rainyl/opencv_dart/tree/2.x) ## Supported platforms @@ -46,18 +46,40 @@ hooks: - contrib - dnn - features2d - - core - include_modules: - core # core is always required thus will be ignored even configured here. + include_modules: + - core - imgproc - videoio ``` -- valid modules: `core`, `calib3d`, `contrib`, `dnn`, `features2d`, `flann`, `highgui`, `imgproc`, `imgcodecs`, `objdetect`, `photo`, `stitching`, `video`, `videoio` +- valid modules: `core`, `calib3d`, `contrib`, `dnn`, `features2d`, `flann`, `freetype`, `highgui`, `imgproc`, `imgcodecs`, `objdetect`, `photo`, `stitching`, `video`, `videoio` +- `freetype` module is disabled by default, if you need it, add it to `include_modules`. - Use `exclude_modules` to disable specific modules, or `include_modules` to enable specific modules. - If neither is specified, all modules except `highgui` will be enabled. - also refer to [example/pubspec.yaml](https://github.com/rainyl/opencv_dart/blob/main/packages/opencv_dart/example/pubspec.yaml) +### Note for macOS and iOS users + +`freetype` is disabled by default, if you need them, add the following lines to `//Podfile`. + +```ruby +target 'Runner' do + use_frameworks! + use_modular_headers! + + flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__)) + + pod 'DartCvMacOS/freetype', '~> 4.12.0.2' # add this line if you need freetype on macos + pod 'DartCvIOS/freetype', '~> 4.12.0.2' # add this line if you need freetype on ios + + target 'RunnerTests' do + inherit! :search_paths + end +end +``` + + ## Package Size ![opencv_dart_size_report](images/opencv_dart_size_report.svg) diff --git a/packages/opencv_dart/example/ios/Podfile b/packages/opencv_dart/example/ios/Podfile index 164df534..7d95acb4 100644 --- a/packages/opencv_dart/example/ios/Podfile +++ b/packages/opencv_dart/example/ios/Podfile @@ -32,6 +32,9 @@ target 'Runner' do use_modular_headers! flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__)) + + pod 'DartCvIOS/freetype', '~> 4.12.0.2' + target 'RunnerTests' do inherit! :search_paths end diff --git a/packages/opencv_dart/example/lib/main.dart b/packages/opencv_dart/example/lib/main.dart index 265f0755..60de7ece 100644 --- a/packages/opencv_dart/example/lib/main.dart +++ b/packages/opencv_dart/example/lib/main.dart @@ -18,6 +18,7 @@ class MyApp extends StatefulWidget { class _MyAppState extends State { var images = []; + final _ft = cv.FreeType2.create(); @override void initState() { @@ -77,11 +78,6 @@ class _MyAppState extends State { onPressed: () async { final data = await DefaultAssetBundle.of(context).load("images/lenna.png"); final bytes = data.buffer.asUint8List(); - // heavy computation - // final (gray, blur) = await heavyTask(bytes); - // setState(() { - // images = [bytes, gray, blur]; - // }); final (gray, blur) = await heavyTaskAsync(cv.imdecode(bytes, cv.IMREAD_COLOR)); setState(() { images = [bytes, cv.imencode(".png", gray).$2, cv.imencode(".png", blur).$2]; diff --git a/packages/opencv_dart/example/macos/Podfile b/packages/opencv_dart/example/macos/Podfile index b52666a1..d60398da 100644 --- a/packages/opencv_dart/example/macos/Podfile +++ b/packages/opencv_dart/example/macos/Podfile @@ -31,6 +31,9 @@ target 'Runner' do use_modular_headers! flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__)) + + pod 'DartCvMacOS/freetype', '~> 4.12.0.2' + target 'RunnerTests' do inherit! :search_paths end diff --git a/packages/opencv_dart/example/macos/Runner.xcodeproj/project.pbxproj b/packages/opencv_dart/example/macos/Runner.xcodeproj/project.pbxproj index 53fd8094..32e26b3f 100644 --- a/packages/opencv_dart/example/macos/Runner.xcodeproj/project.pbxproj +++ b/packages/opencv_dart/example/macos/Runner.xcodeproj/project.pbxproj @@ -557,7 +557,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.14; + MACOSX_DEPLOYMENT_TARGET = 10.15; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; @@ -639,7 +639,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.14; + MACOSX_DEPLOYMENT_TARGET = 10.15; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; @@ -689,7 +689,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.14; + MACOSX_DEPLOYMENT_TARGET = 10.15; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; diff --git a/packages/opencv_dart/pubspec.yaml b/packages/opencv_dart/pubspec.yaml index 61eb3285..2622188a 100644 --- a/packages/opencv_dart/pubspec.yaml +++ b/packages/opencv_dart/pubspec.yaml @@ -3,7 +3,7 @@ description: | OpenCV4 bindings for Flutter, using dart:ffi. version: 1.4.4 opencv_version: 4.12.0.0 -dartcv_version: 4.12.0.1 +dartcv_version: 4.12.0.2 repository: https://github.com/rainyl/opencv_dart homepage: https://github.com/rainyl/opencv_dart/tree/main/packages/opencv_dart @@ -14,7 +14,7 @@ environment: dependencies: flutter: sdk: flutter - dartcv4: 1.1.7 + dartcv4: 1.1.8 # dartcv4: # path: ../dartcv