objdetect.hpp 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  16. // Third party copyrights are property of their respective owners.
  17. //
  18. // Redistribution and use in source and binary forms, with or without modification,
  19. // are permitted provided that the following conditions are met:
  20. //
  21. // * Redistribution's of source code must retain the above copyright notice,
  22. // this list of conditions and the following disclaimer.
  23. //
  24. // * Redistribution's in binary form must reproduce the above copyright notice,
  25. // this list of conditions and the following disclaimer in the documentation
  26. // and/or other materials provided with the distribution.
  27. //
  28. // * The name of the copyright holders may not be used to endorse or promote products
  29. // derived from this software without specific prior written permission.
  30. //
  31. // This software is provided by the copyright holders and contributors "as is" and
  32. // any express or implied warranties, including, but not limited to, the implied
  33. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  34. // In no event shall the Intel Corporation or contributors be liable for any direct,
  35. // indirect, incidental, special, exemplary, or consequential damages
  36. // (including, but not limited to, procurement of substitute goods or services;
  37. // loss of use, data, or profits; or business interruption) however caused
  38. // and on any theory of liability, whether in contract, strict liability,
  39. // or tort (including negligence or otherwise) arising in any way out of
  40. // the use of this software, even if advised of the possibility of such damage.
  41. //
  42. //M*/
  43. #ifndef OPENCV_OBJDETECT_HPP
  44. #define OPENCV_OBJDETECT_HPP
  45. #include "opencv2/core.hpp"
  46. /**
  47. @defgroup objdetect Object Detection
  48. Haar Feature-based Cascade Classifier for Object Detection
  49. ----------------------------------------------------------
  50. The object detector described below has been initially proposed by Paul Viola @cite Viola01 and
  51. improved by Rainer Lienhart @cite Lienhart02 .
  52. First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
  53. trained with a few hundred sample views of a particular object (i.e., a face or a car), called
  54. positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary
  55. images of the same size.
  56. After a classifier is trained, it can be applied to a region of interest (of the same size as used
  57. during the training) in an input image. The classifier outputs a "1" if the region is likely to show
  58. the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can
  59. move the search window across the image and check every location using the classifier. The
  60. classifier is designed so that it can be easily "resized" in order to be able to find the objects of
  61. interest at different sizes, which is more efficient than resizing the image itself. So, to find an
  62. object of an unknown size in the image the scan procedure should be done several times at different
  63. scales.
  64. The word "cascade" in the classifier name means that the resultant classifier consists of several
  65. simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some
  66. stage the candidate is rejected or all the stages are passed. The word "boosted" means that the
  67. classifiers at every stage of the cascade are complex themselves and they are built out of basic
  68. classifiers using one of four different boosting techniques (weighted voting). Currently Discrete
  69. Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are
  70. decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic
  71. classifiers, and are calculated as described below. The current algorithm uses the following
  72. Haar-like features:
  73. ![image](pics/haarfeatures.png)
  74. The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within
  75. the region of interest and the scale (this scale is not the same as the scale used at the detection
  76. stage, though these two scales are multiplied). For example, in the case of the third line feature
  77. (2c) the response is calculated as the difference between the sum of image pixels under the
  78. rectangle covering the whole feature (including the two white stripes and the black stripe in the
  79. middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to
  80. compensate for the differences in the size of areas. The sums of pixel values over a rectangular
  81. regions are calculated rapidly using integral images (see below and the integral description).
  82. To see the object detector at work, have a look at the facedetect demo:
  83. <https://github.com/opencv/opencv/tree/3.4/samples/cpp/dbt_face_detection.cpp>
  84. The following reference is for the detection part only. There is a separate application called
  85. opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
  86. @note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
  87. addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
  88. using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
  89. <http://research.microsoft.com/en-us/um/people/viola/Pubs/Detect/violaJones_CVPR2001.pdf>
  90. @{
  91. @defgroup objdetect_c C API
  92. @}
  93. */
  94. typedef struct CvHaarClassifierCascade CvHaarClassifierCascade;
  95. namespace cv
  96. {
  97. //! @addtogroup objdetect
  98. //! @{
  99. ///////////////////////////// Object Detection ////////////////////////////
  100. //! class for grouping object candidates, detected by Cascade Classifier, HOG etc.
  101. //! instance of the class is to be passed to cv::partition (see cxoperations.hpp)
  102. class CV_EXPORTS SimilarRects
  103. {
  104. public:
  105. SimilarRects(double _eps) : eps(_eps) {}
  106. inline bool operator()(const Rect& r1, const Rect& r2) const
  107. {
  108. double delta = eps * ((std::min)(r1.width, r2.width) + (std::min)(r1.height, r2.height)) * 0.5;
  109. return std::abs(r1.x - r2.x) <= delta &&
  110. std::abs(r1.y - r2.y) <= delta &&
  111. std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&
  112. std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
  113. }
  114. double eps;
  115. };
  116. /** @brief Groups the object candidate rectangles.
  117. @param rectList Input/output vector of rectangles. Output vector includes retained and grouped
  118. rectangles. (The Python list is not modified in place.)
  119. @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
  120. group of rectangles to retain it.
  121. @param eps Relative difference between sides of the rectangles to merge them into a group.
  122. The function is a wrapper for the generic function partition . It clusters all the input rectangles
  123. using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
  124. locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
  125. \f$\texttt{eps}\rightarrow +\inf\f$ , all the rectangles are put in one cluster. Then, the small
  126. clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
  127. cluster, the average rectangle is computed and put into the output rectangle list.
  128. */
  129. CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps = 0.2);
  130. /** @overload */
  131. CV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights,
  132. int groupThreshold, double eps = 0.2);
  133. /** @overload */
  134. CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, int groupThreshold,
  135. double eps, std::vector<int>* weights, std::vector<double>* levelWeights );
  136. /** @overload */
  137. CV_EXPORTS void groupRectangles(std::vector<Rect>& rectList, std::vector<int>& rejectLevels,
  138. std::vector<double>& levelWeights, int groupThreshold, double eps = 0.2);
  139. /** @overload */
  140. CV_EXPORTS void groupRectangles_meanshift(std::vector<Rect>& rectList, std::vector<double>& foundWeights,
  141. std::vector<double>& foundScales,
  142. double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
  143. template<> CV_EXPORTS void DefaultDeleter<CvHaarClassifierCascade>::operator ()(CvHaarClassifierCascade* obj) const;
  144. enum { CASCADE_DO_CANNY_PRUNING = 1,
  145. CASCADE_SCALE_IMAGE = 2,
  146. CASCADE_FIND_BIGGEST_OBJECT = 4,
  147. CASCADE_DO_ROUGH_SEARCH = 8
  148. };
  149. class CV_EXPORTS_W BaseCascadeClassifier : public Algorithm
  150. {
  151. public:
  152. virtual ~BaseCascadeClassifier();
  153. virtual bool empty() const CV_OVERRIDE = 0;
  154. virtual bool load( const String& filename ) = 0;
  155. virtual void detectMultiScale( InputArray image,
  156. CV_OUT std::vector<Rect>& objects,
  157. double scaleFactor,
  158. int minNeighbors, int flags,
  159. Size minSize, Size maxSize ) = 0;
  160. virtual void detectMultiScale( InputArray image,
  161. CV_OUT std::vector<Rect>& objects,
  162. CV_OUT std::vector<int>& numDetections,
  163. double scaleFactor,
  164. int minNeighbors, int flags,
  165. Size minSize, Size maxSize ) = 0;
  166. virtual void detectMultiScale( InputArray image,
  167. CV_OUT std::vector<Rect>& objects,
  168. CV_OUT std::vector<int>& rejectLevels,
  169. CV_OUT std::vector<double>& levelWeights,
  170. double scaleFactor,
  171. int minNeighbors, int flags,
  172. Size minSize, Size maxSize,
  173. bool outputRejectLevels ) = 0;
  174. virtual bool isOldFormatCascade() const = 0;
  175. virtual Size getOriginalWindowSize() const = 0;
  176. virtual int getFeatureType() const = 0;
  177. virtual void* getOldCascade() = 0;
  178. class CV_EXPORTS MaskGenerator
  179. {
  180. public:
  181. virtual ~MaskGenerator() {}
  182. virtual Mat generateMask(const Mat& src)=0;
  183. virtual void initializeMask(const Mat& /*src*/) { }
  184. };
  185. virtual void setMaskGenerator(const Ptr<MaskGenerator>& maskGenerator) = 0;
  186. virtual Ptr<MaskGenerator> getMaskGenerator() = 0;
  187. };
  188. /** @example samples/cpp/facedetect.cpp
  189. This program demonstrates usage of the Cascade classifier class
  190. \image html Cascade_Classifier_Tutorial_Result_Haar.jpg "Sample screenshot" width=321 height=254
  191. */
  192. /** @brief Cascade classifier class for object detection.
  193. */
  194. class CV_EXPORTS_W CascadeClassifier
  195. {
  196. public:
  197. CV_WRAP CascadeClassifier();
  198. /** @brief Loads a classifier from a file.
  199. @param filename Name of the file from which the classifier is loaded.
  200. */
  201. CV_WRAP CascadeClassifier(const String& filename);
  202. ~CascadeClassifier();
  203. /** @brief Checks whether the classifier has been loaded.
  204. */
  205. CV_WRAP bool empty() const;
  206. /** @brief Loads a classifier from a file.
  207. @param filename Name of the file from which the classifier is loaded. The file may contain an old
  208. HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
  209. traincascade application.
  210. */
  211. CV_WRAP bool load( const String& filename );
  212. /** @brief Reads a classifier from a FileStorage node.
  213. @note The file may contain a new cascade classifier (trained traincascade application) only.
  214. */
  215. CV_WRAP bool read( const FileNode& node );
  216. /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
  217. of rectangles.
  218. @param image Matrix of the type CV_8U containing an image where objects are detected.
  219. @param objects Vector of rectangles where each rectangle contains the detected object, the
  220. rectangles may be partially outside the original image.
  221. @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
  222. @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
  223. to retain it.
  224. @param flags Parameter with the same meaning for an old cascade as in the function
  225. cvHaarDetectObjects. It is not used for a new cascade.
  226. @param minSize Minimum possible object size. Objects smaller than that are ignored.
  227. @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
  228. The function is parallelized with the TBB library.
  229. @note
  230. - (Python) A face detection example using cascade classifiers can be found at
  231. opencv_source_code/samples/python/facedetect.py
  232. */
  233. CV_WRAP void detectMultiScale( InputArray image,
  234. CV_OUT std::vector<Rect>& objects,
  235. double scaleFactor = 1.1,
  236. int minNeighbors = 3, int flags = 0,
  237. Size minSize = Size(),
  238. Size maxSize = Size() );
  239. /** @overload
  240. @param image Matrix of the type CV_8U containing an image where objects are detected.
  241. @param objects Vector of rectangles where each rectangle contains the detected object, the
  242. rectangles may be partially outside the original image.
  243. @param numDetections Vector of detection numbers for the corresponding objects. An object's number
  244. of detections is the number of neighboring positively classified rectangles that were joined
  245. together to form the object.
  246. @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
  247. @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
  248. to retain it.
  249. @param flags Parameter with the same meaning for an old cascade as in the function
  250. cvHaarDetectObjects. It is not used for a new cascade.
  251. @param minSize Minimum possible object size. Objects smaller than that are ignored.
  252. @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
  253. */
  254. CV_WRAP_AS(detectMultiScale2) void detectMultiScale( InputArray image,
  255. CV_OUT std::vector<Rect>& objects,
  256. CV_OUT std::vector<int>& numDetections,
  257. double scaleFactor=1.1,
  258. int minNeighbors=3, int flags=0,
  259. Size minSize=Size(),
  260. Size maxSize=Size() );
  261. /** @overload
  262. This function allows you to retrieve the final stage decision certainty of classification.
  263. For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
  264. For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
  265. This value can then be used to separate strong from weaker classifications.
  266. A code sample on how to use it efficiently can be found below:
  267. @code
  268. Mat img;
  269. vector<double> weights;
  270. vector<int> levels;
  271. vector<Rect> detections;
  272. CascadeClassifier model("/path/to/your/model.xml");
  273. model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
  274. cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
  275. @endcode
  276. */
  277. CV_WRAP_AS(detectMultiScale3) void detectMultiScale( InputArray image,
  278. CV_OUT std::vector<Rect>& objects,
  279. CV_OUT std::vector<int>& rejectLevels,
  280. CV_OUT std::vector<double>& levelWeights,
  281. double scaleFactor = 1.1,
  282. int minNeighbors = 3, int flags = 0,
  283. Size minSize = Size(),
  284. Size maxSize = Size(),
  285. bool outputRejectLevels = false );
  286. CV_WRAP bool isOldFormatCascade() const;
  287. CV_WRAP Size getOriginalWindowSize() const;
  288. CV_WRAP int getFeatureType() const;
  289. void* getOldCascade();
  290. CV_WRAP static bool convert(const String& oldcascade, const String& newcascade);
  291. void setMaskGenerator(const Ptr<BaseCascadeClassifier::MaskGenerator>& maskGenerator);
  292. Ptr<BaseCascadeClassifier::MaskGenerator> getMaskGenerator();
  293. Ptr<BaseCascadeClassifier> cc;
  294. };
  295. CV_EXPORTS Ptr<BaseCascadeClassifier::MaskGenerator> createFaceDetectionMaskGenerator();
  296. //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
  297. //! struct for detection region of interest (ROI)
  298. struct DetectionROI
  299. {
  300. //! scale(size) of the bounding box
  301. double scale;
  302. //! set of requested locations to be evaluated
  303. std::vector<cv::Point> locations;
  304. //! vector that will contain confidence values for each location
  305. std::vector<double> confidences;
  306. };
  307. /**@brief Implementation of HOG (Histogram of Oriented Gradients) descriptor and object detector.
  308. the HOG descriptor algorithm introduced by Navneet Dalal and Bill Triggs @cite Dalal2005 .
  309. useful links:
  310. https://hal.inria.fr/inria-00548512/document/
  311. https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
  312. https://software.intel.com/en-us/ipp-dev-reference-histogram-of-oriented-gradients-hog-descriptor
  313. http://www.learnopencv.com/histogram-of-oriented-gradients
  314. http://www.learnopencv.com/handwritten-digits-classification-an-opencv-c-python-tutorial
  315. */
  316. struct CV_EXPORTS_W HOGDescriptor
  317. {
  318. public:
  319. enum { L2Hys = 0 //!< Default histogramNormType
  320. };
  321. enum { DEFAULT_NLEVELS = 64 //!< Default nlevels value.
  322. };
  323. /**@brief Creates the HOG descriptor and detector with default parameters.
  324. aqual to HOGDescriptor(Size(64,128), Size(16,16), Size(8,8), Size(8,8), 9, 1 )
  325. */
  326. CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
  327. cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
  328. histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),
  329. free_coef(-1.f), nlevels(HOGDescriptor::DEFAULT_NLEVELS), signedGradient(false)
  330. {}
  331. /** @overload
  332. @param _winSize sets winSize with given value.
  333. @param _blockSize sets blockSize with given value.
  334. @param _blockStride sets blockStride with given value.
  335. @param _cellSize sets cellSize with given value.
  336. @param _nbins sets nbins with given value.
  337. @param _derivAperture sets derivAperture with given value.
  338. @param _winSigma sets winSigma with given value.
  339. @param _histogramNormType sets histogramNormType with given value.
  340. @param _L2HysThreshold sets L2HysThreshold with given value.
  341. @param _gammaCorrection sets gammaCorrection with given value.
  342. @param _nlevels sets nlevels with given value.
  343. @param _signedGradient sets signedGradient with given value.
  344. */
  345. CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,
  346. Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,
  347. int _histogramNormType=HOGDescriptor::L2Hys,
  348. double _L2HysThreshold=0.2, bool _gammaCorrection=false,
  349. int _nlevels=HOGDescriptor::DEFAULT_NLEVELS, bool _signedGradient=false)
  350. : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize),
  351. nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma),
  352. histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),
  353. gammaCorrection(_gammaCorrection), free_coef(-1.f), nlevels(_nlevels), signedGradient(_signedGradient)
  354. {}
  355. /** @overload
  356. Creates the HOG descriptor and detector and loads HOGDescriptor parameters and coefficients for the linear SVM classifier from a file.
  357. @param filename the file name containing HOGDescriptor properties and coefficients of the trained classifier
  358. */
  359. CV_WRAP HOGDescriptor(const String& filename)
  360. {
  361. load(filename);
  362. }
  363. /** @overload
  364. @param d the HOGDescriptor which cloned to create a new one.
  365. */
  366. HOGDescriptor(const HOGDescriptor& d)
  367. {
  368. d.copyTo(*this);
  369. }
  370. /**@brief Default destructor.
  371. */
  372. virtual ~HOGDescriptor() {}
  373. /**@brief Returns the number of coefficients required for the classification.
  374. */
  375. CV_WRAP size_t getDescriptorSize() const;
  376. /** @brief Checks if detector size equal to descriptor size.
  377. */
  378. CV_WRAP bool checkDetectorSize() const;
  379. /** @brief Returns winSigma value
  380. */
  381. CV_WRAP double getWinSigma() const;
  382. /**@example samples/cpp/peopledetect.cpp
  383. */
  384. /**@brief Sets coefficients for the linear SVM classifier.
  385. @param _svmdetector coefficients for the linear SVM classifier.
  386. */
  387. CV_WRAP virtual void setSVMDetector(InputArray _svmdetector);
  388. /** @brief Reads HOGDescriptor parameters and coefficients for the linear SVM classifier from a file node.
  389. @param fn File node
  390. */
  391. virtual bool read(FileNode& fn);
  392. /** @brief Stores HOGDescriptor parameters and coefficients for the linear SVM classifier in a file storage.
  393. @param fs File storage
  394. @param objname Object name
  395. */
  396. virtual void write(FileStorage& fs, const String& objname) const;
  397. /** @brief loads HOGDescriptor parameters and coefficients for the linear SVM classifier from a file
  398. @param filename Name of the file to read.
  399. @param objname The optional name of the node to read (if empty, the first top-level node will be used).
  400. */
  401. CV_WRAP virtual bool load(const String& filename, const String& objname = String());
  402. /** @brief saves HOGDescriptor parameters and coefficients for the linear SVM classifier to a file
  403. @param filename File name
  404. @param objname Object name
  405. */
  406. CV_WRAP virtual void save(const String& filename, const String& objname = String()) const;
  407. /** @brief clones the HOGDescriptor
  408. @param c cloned HOGDescriptor
  409. */
  410. virtual void copyTo(HOGDescriptor& c) const;
  411. /**@example samples/cpp/train_HOG.cpp
  412. */
  413. /** @brief Computes HOG descriptors of given image.
  414. @param img Matrix of the type CV_8U containing an image where HOG features will be calculated.
  415. @param descriptors Matrix of the type CV_32F
  416. @param winStride Window stride. It must be a multiple of block stride.
  417. @param padding Padding
  418. @param locations Vector of Point
  419. */
  420. CV_WRAP virtual void compute(InputArray img,
  421. CV_OUT std::vector<float>& descriptors,
  422. Size winStride = Size(), Size padding = Size(),
  423. const std::vector<Point>& locations = std::vector<Point>()) const;
  424. /** @brief Performs object detection without a multi-scale window.
  425. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  426. @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
  427. @param weights Vector that will contain confidence values for each detected object.
  428. @param hitThreshold Threshold for the distance between features and SVM classifying plane.
  429. Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
  430. But if the free coefficient is omitted (which is allowed), you can specify it manually here.
  431. @param winStride Window stride. It must be a multiple of block stride.
  432. @param padding Padding
  433. @param searchLocations Vector of Point includes set of requested locations to be evaluated.
  434. */
  435. CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
  436. CV_OUT std::vector<double>& weights,
  437. double hitThreshold = 0, Size winStride = Size(),
  438. Size padding = Size(),
  439. const std::vector<Point>& searchLocations = std::vector<Point>()) const;
  440. /** @brief Performs object detection without a multi-scale window.
  441. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  442. @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
  443. @param hitThreshold Threshold for the distance between features and SVM classifying plane.
  444. Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
  445. But if the free coefficient is omitted (which is allowed), you can specify it manually here.
  446. @param winStride Window stride. It must be a multiple of block stride.
  447. @param padding Padding
  448. @param searchLocations Vector of Point includes locations to search.
  449. */
  450. virtual void detect(const Mat& img, CV_OUT std::vector<Point>& foundLocations,
  451. double hitThreshold = 0, Size winStride = Size(),
  452. Size padding = Size(),
  453. const std::vector<Point>& searchLocations=std::vector<Point>()) const;
  454. /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
  455. of rectangles.
  456. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  457. @param foundLocations Vector of rectangles where each rectangle contains the detected object.
  458. @param foundWeights Vector that will contain confidence values for each detected object.
  459. @param hitThreshold Threshold for the distance between features and SVM classifying plane.
  460. Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
  461. But if the free coefficient is omitted (which is allowed), you can specify it manually here.
  462. @param winStride Window stride. It must be a multiple of block stride.
  463. @param padding Padding
  464. @param scale Coefficient of the detection window increase.
  465. @param groupThreshold Coefficient to regulate the similarity threshold. When detected, some objects can be covered
  466. by many rectangles. 0 means not to perform grouping.
  467. @param useMeanshiftGrouping indicates grouping algorithm
  468. */
  469. CV_WRAP virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,
  470. CV_OUT std::vector<double>& foundWeights, double hitThreshold = 0,
  471. Size winStride = Size(), Size padding = Size(), double scale = 1.05,
  472. double groupThreshold = 2.0, bool useMeanshiftGrouping = false) const;
  473. /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
  474. of rectangles.
  475. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  476. @param foundLocations Vector of rectangles where each rectangle contains the detected object.
  477. @param hitThreshold Threshold for the distance between features and SVM classifying plane.
  478. Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
  479. But if the free coefficient is omitted (which is allowed), you can specify it manually here.
  480. @param winStride Window stride. It must be a multiple of block stride.
  481. @param padding Padding
  482. @param scale Coefficient of the detection window increase.
  483. @param groupThreshold Coefficient to regulate the similarity threshold. When detected, some objects can be covered
  484. by many rectangles. 0 means not to perform grouping.
  485. @param useMeanshiftGrouping indicates grouping algorithm
  486. */
  487. virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,
  488. double hitThreshold = 0, Size winStride = Size(),
  489. Size padding = Size(), double scale = 1.05,
  490. double groupThreshold = 2.0, bool useMeanshiftGrouping = false) const;
  491. /** @brief Computes gradients and quantized gradient orientations.
  492. @param img Matrix contains the image to be computed
  493. @param grad Matrix of type CV_32FC2 contains computed gradients
  494. @param angleOfs Matrix of type CV_8UC2 contains quantized gradient orientations
  495. @param paddingTL Padding from top-left
  496. @param paddingBR Padding from bottom-right
  497. */
  498. CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
  499. Size paddingTL = Size(), Size paddingBR = Size()) const;
  500. /** @brief Returns coefficients of the classifier trained for people detection (for 64x128 windows).
  501. */
  502. CV_WRAP static std::vector<float> getDefaultPeopleDetector();
  503. /**@example samples/tapi/hog.cpp
  504. */
  505. /** @brief Returns coefficients of the classifier trained for people detection (for 48x96 windows).
  506. */
  507. CV_WRAP static std::vector<float> getDaimlerPeopleDetector();
  508. //! Detection window size. Align to block size and block stride. Default value is Size(64,128).
  509. CV_PROP Size winSize;
  510. //! Block size in pixels. Align to cell size. Default value is Size(16,16).
  511. CV_PROP Size blockSize;
  512. //! Block stride. It must be a multiple of cell size. Default value is Size(8,8).
  513. CV_PROP Size blockStride;
  514. //! Cell size. Default value is Size(8,8).
  515. CV_PROP Size cellSize;
  516. //! Number of bins used in the calculation of histogram of gradients. Default value is 9.
  517. CV_PROP int nbins;
  518. //! not documented
  519. CV_PROP int derivAperture;
  520. //! Gaussian smoothing window parameter.
  521. CV_PROP double winSigma;
  522. //! histogramNormType
  523. CV_PROP int histogramNormType;
  524. //! L2-Hys normalization method shrinkage.
  525. CV_PROP double L2HysThreshold;
  526. //! Flag to specify whether the gamma correction preprocessing is required or not.
  527. CV_PROP bool gammaCorrection;
  528. //! coefficients for the linear SVM classifier.
  529. CV_PROP std::vector<float> svmDetector;
  530. //! coefficients for the linear SVM classifier used when OpenCL is enabled
  531. UMat oclSvmDetector;
  532. //! not documented
  533. float free_coef;
  534. //! Maximum number of detection window increases. Default value is 64
  535. CV_PROP int nlevels;
  536. //! Indicates signed gradient will be used or not
  537. CV_PROP bool signedGradient;
  538. /** @brief evaluate specified ROI and return confidence value for each location
  539. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  540. @param locations Vector of Point
  541. @param foundLocations Vector of Point where each Point is detected object's top-left point.
  542. @param confidences confidences
  543. @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
  544. it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
  545. the free coefficient is omitted (which is allowed), you can specify it manually here
  546. @param winStride winStride
  547. @param padding padding
  548. */
  549. virtual void detectROI(const cv::Mat& img, const std::vector<cv::Point> &locations,
  550. CV_OUT std::vector<cv::Point>& foundLocations, CV_OUT std::vector<double>& confidences,
  551. double hitThreshold = 0, cv::Size winStride = Size(),
  552. cv::Size padding = Size()) const;
  553. /** @brief evaluate specified ROI and return confidence value for each location in multiple scales
  554. @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
  555. @param foundLocations Vector of rectangles where each rectangle contains the detected object.
  556. @param locations Vector of DetectionROI
  557. @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually it is 0 and should be specified
  558. in the detector coefficients (as the last free coefficient). But if the free coefficient is omitted (which is allowed), you can specify it manually here.
  559. @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
  560. */
  561. virtual void detectMultiScaleROI(const cv::Mat& img,
  562. CV_OUT std::vector<cv::Rect>& foundLocations,
  563. std::vector<DetectionROI>& locations,
  564. double hitThreshold = 0,
  565. int groupThreshold = 0) const;
  566. /** @brief read/parse Dalal's alt model file
  567. @param modelfile Path of Dalal's alt model file.
  568. */
  569. void readALTModel(String modelfile);
  570. /** @brief Groups the object candidate rectangles.
  571. @param rectList Input/output vector of rectangles. Output vector includes retained and grouped rectangles. (The Python list is not modified in place.)
  572. @param weights Input/output vector of weights of rectangles. Output vector includes weights of retained and grouped rectangles. (The Python list is not modified in place.)
  573. @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
  574. @param eps Relative difference between sides of the rectangles to merge them into a group.
  575. */
  576. void groupRectangles(std::vector<cv::Rect>& rectList, std::vector<double>& weights, int groupThreshold, double eps) const;
  577. };
  578. class CV_EXPORTS_W QRCodeEncoder {
  579. protected:
  580. QRCodeEncoder(); // use ::create()
  581. public:
  582. virtual ~QRCodeEncoder();
  583. enum EncodeMode {
  584. MODE_AUTO = -1,
  585. MODE_NUMERIC = 1, // 0b0001
  586. MODE_ALPHANUMERIC = 2, // 0b0010
  587. MODE_BYTE = 4, // 0b0100
  588. MODE_ECI = 7, // 0b0111
  589. MODE_KANJI = 8, // 0b1000
  590. MODE_STRUCTURED_APPEND = 3 // 0b0011
  591. };
  592. enum CorrectionLevel {
  593. CORRECT_LEVEL_L = 0,
  594. CORRECT_LEVEL_M = 1,
  595. CORRECT_LEVEL_Q = 2,
  596. CORRECT_LEVEL_H = 3
  597. };
  598. enum ECIEncodings {
  599. ECI_UTF8 = 26
  600. };
  601. /** @brief QR code encoder parameters.
  602. @param version The optional version of QR code (by default - maximum possible depending on
  603. the length of the string).
  604. @param correction_level The optional level of error correction (by default - the lowest).
  605. @param mode The optional encoding mode - Numeric, Alphanumeric, Byte, Kanji, ECI or Structured Append.
  606. @param structure_number The optional number of QR codes to generate in Structured Append mode.
  607. */
  608. struct CV_EXPORTS_W_SIMPLE Params
  609. {
  610. CV_WRAP Params();
  611. CV_PROP_RW int version;
  612. CV_PROP_RW CorrectionLevel correction_level;
  613. CV_PROP_RW EncodeMode mode;
  614. CV_PROP_RW int structure_number;
  615. };
  616. /** @brief Constructor
  617. @param parameters QR code encoder parameters QRCodeEncoder::Params
  618. */
  619. static CV_WRAP
  620. Ptr<QRCodeEncoder> create(const QRCodeEncoder::Params& parameters = QRCodeEncoder::Params());
  621. /** @brief Generates QR code from input string.
  622. @param encoded_info Input string to encode.
  623. @param qrcode Generated QR code.
  624. */
  625. CV_WRAP virtual void encode(const String& encoded_info, OutputArray qrcode) = 0;
  626. /** @brief Generates QR code from input string in Structured Append mode. The encoded message is splitting over a number of QR codes.
  627. @param encoded_info Input string to encode.
  628. @param qrcodes Vector of generated QR codes.
  629. */
  630. CV_WRAP virtual void encodeStructuredAppend(const String& encoded_info, OutputArrayOfArrays qrcodes) = 0;
  631. };
  632. class CV_EXPORTS_W QRCodeDetector
  633. {
  634. public:
  635. CV_WRAP QRCodeDetector();
  636. ~QRCodeDetector();
  637. /** @brief sets the epsilon used during the horizontal scan of QR code stop marker detection.
  638. @param epsX Epsilon neighborhood, which allows you to determine the horizontal pattern
  639. of the scheme 1:1:3:1:1 according to QR code standard.
  640. */
  641. CV_WRAP void setEpsX(double epsX);
  642. /** @brief sets the epsilon used during the vertical scan of QR code stop marker detection.
  643. @param epsY Epsilon neighborhood, which allows you to determine the vertical pattern
  644. of the scheme 1:1:3:1:1 according to QR code standard.
  645. */
  646. CV_WRAP void setEpsY(double epsY);
  647. /** @brief Detects QR code in image and returns the quadrangle containing the code.
  648. @param img grayscale or color (BGR) image containing (or not) QR code.
  649. @param points Output vector of vertices of the minimum-area quadrangle containing the code.
  650. */
  651. CV_WRAP bool detect(InputArray img, OutputArray points) const;
  652. /** @brief Decodes QR code in image once it's found by the detect() method.
  653. Returns UTF8-encoded output string or empty string if the code cannot be decoded.
  654. @param img grayscale or color (BGR) image containing QR code.
  655. @param points Quadrangle vertices found by detect() method (or some other algorithm).
  656. @param straight_qrcode The optional output image containing rectified and binarized QR code
  657. */
  658. CV_WRAP cv::String decode(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
  659. /** @brief Decodes QR code on a curved surface in image once it's found by the detect() method.
  660. Returns UTF8-encoded output string or empty string if the code cannot be decoded.
  661. @param img grayscale or color (BGR) image containing QR code.
  662. @param points Quadrangle vertices found by detect() method (or some other algorithm).
  663. @param straight_qrcode The optional output image containing rectified and binarized QR code
  664. */
  665. CV_WRAP cv::String decodeCurved(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
  666. /** @brief Both detects and decodes QR code
  667. @param img grayscale or color (BGR) image containing QR code.
  668. @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found.
  669. @param straight_qrcode The optional output image containing rectified and binarized QR code
  670. */
  671. CV_WRAP cv::String detectAndDecode(InputArray img, OutputArray points=noArray(),
  672. OutputArray straight_qrcode = noArray());
  673. /** @brief Both detects and decodes QR code on a curved surface
  674. @param img grayscale or color (BGR) image containing QR code.
  675. @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found.
  676. @param straight_qrcode The optional output image containing rectified and binarized QR code
  677. */
  678. CV_WRAP cv::String detectAndDecodeCurved(InputArray img, OutputArray points=noArray(),
  679. OutputArray straight_qrcode = noArray());
  680. /** @brief Detects QR codes in image and returns the vector of the quadrangles containing the codes.
  681. @param img grayscale or color (BGR) image containing (or not) QR codes.
  682. @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes.
  683. */
  684. CV_WRAP
  685. bool detectMulti(InputArray img, OutputArray points) const;
  686. /** @brief Decodes QR codes in image once it's found by the detect() method.
  687. @param img grayscale or color (BGR) image containing QR codes.
  688. @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
  689. @param points vector of Quadrangle vertices found by detect() method (or some other algorithm).
  690. @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes
  691. */
  692. CV_WRAP
  693. bool decodeMulti(
  694. InputArray img, InputArray points,
  695. CV_OUT std::vector<cv::String>& decoded_info,
  696. OutputArrayOfArrays straight_qrcode = noArray()
  697. ) const;
  698. /** @brief Both detects and decodes QR codes
  699. @param img grayscale or color (BGR) image containing QR codes.
  700. @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
  701. @param points optional output vector of vertices of the found QR code quadrangles. Will be empty if not found.
  702. @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes
  703. */
  704. CV_WRAP
  705. bool detectAndDecodeMulti(
  706. InputArray img, CV_OUT std::vector<cv::String>& decoded_info,
  707. OutputArray points = noArray(),
  708. OutputArrayOfArrays straight_qrcode = noArray()
  709. ) const;
  710. #ifndef CV_DOXYGEN // COMPATIBILITY
  711. inline bool decodeMulti(
  712. InputArray img, InputArray points,
  713. CV_OUT std::vector<std::string>& decoded_info,
  714. OutputArrayOfArrays straight_qrcode = noArray()
  715. ) const
  716. {
  717. std::vector<cv::String> decoded_info_;
  718. bool res = decodeMulti(img, points, decoded_info_, straight_qrcode);
  719. decoded_info.resize(decoded_info_.size());
  720. for (size_t i = 0; i < decoded_info.size(); ++i)
  721. {
  722. cv::String s; std::swap(s, decoded_info_[i]);
  723. decoded_info[i] = s;
  724. }
  725. return res;
  726. }
  727. inline bool detectAndDecodeMulti(
  728. InputArray img, CV_OUT std::vector<std::string>& decoded_info,
  729. OutputArray points = noArray(),
  730. OutputArrayOfArrays straight_qrcode = noArray()
  731. ) const
  732. {
  733. std::vector<cv::String> decoded_info_;
  734. bool res = detectAndDecodeMulti(img, decoded_info_, points, straight_qrcode);
  735. decoded_info.resize(decoded_info_.size());
  736. for (size_t i = 0; i < decoded_info.size(); ++i)
  737. {
  738. cv::String s; std::swap(s, decoded_info_[i]);
  739. decoded_info[i] = s;
  740. }
  741. return res;
  742. }
  743. #endif
  744. protected:
  745. struct Impl;
  746. Ptr<Impl> p;
  747. };
  748. /** @brief Detect QR code in image and return minimum area of quadrangle that describes QR code.
  749. @param in Matrix of the type CV_8UC1 containing an image where QR code are detected.
  750. @param points Output vector of vertices of a quadrangle of minimal area that describes QR code.
  751. @param eps_x Epsilon neighborhood, which allows you to determine the horizontal pattern of the scheme 1:1:3:1:1 according to QR code standard.
  752. @param eps_y Epsilon neighborhood, which allows you to determine the vertical pattern of the scheme 1:1:3:1:1 according to QR code standard.
  753. */
  754. CV_EXPORTS bool detectQRCode(InputArray in, std::vector<Point> &points, double eps_x = 0.2, double eps_y = 0.1);
  755. /** @brief Decode QR code in image and return text that is encrypted in QR code.
  756. @param in Matrix of the type CV_8UC1 containing an image where QR code are detected.
  757. @param points Input vector of vertices of a quadrangle of minimal area that describes QR code.
  758. @param decoded_info String information that is encrypted in QR code.
  759. @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code.
  760. */
  761. CV_EXPORTS bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray());
  762. /** @brief Decode QR code on a curved surface in image and return text that is encrypted in QR code.
  763. @param in Matrix of the type CV_8UC1 containing an image where QR code are detected.
  764. @param points Input vector of vertices of a quadrangle of minimal area that describes QR code.
  765. @param decoded_info String information that is encrypted in QR code.
  766. @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code.
  767. */
  768. CV_EXPORTS bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray());
  769. //! @} objdetect
  770. }
  771. #include "opencv2/objdetect/detection_based_tracker.hpp"
  772. #ifndef DISABLE_OPENCV_24_COMPATIBILITY
  773. #include "opencv2/objdetect/objdetect_c.h"
  774. #endif
  775. #endif