photo.hpp 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #ifndef OPENCV_PHOTO_HPP
  43. #define OPENCV_PHOTO_HPP
  44. #include "opencv2/core.hpp"
  45. #include "opencv2/imgproc.hpp"
  46. /**
  47. @defgroup photo Computational Photography
  48. This module includes photo processing algorithms
  49. @{
  50. @defgroup photo_inpaint Inpainting
  51. @defgroup photo_denoise Denoising
  52. @defgroup photo_hdr HDR imaging
  53. This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
  54. camera calibration with multiple exposures and exposure fusion.
  55. @defgroup photo_decolor Contrast Preserving Decolorization
  56. Useful links:
  57. http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
  58. @defgroup photo_clone Seamless Cloning
  59. Useful links:
  60. https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
  61. @defgroup photo_render Non-Photorealistic Rendering
  62. Useful links:
  63. http://www.inf.ufrgs.br/~eslgastal/DomainTransform
  64. https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
  65. @defgroup photo_c C API
  66. @}
  67. */
  68. namespace cv
  69. {
  70. //! @addtogroup photo
  71. //! @{
  72. //! @addtogroup photo_inpaint
  73. //! @{
  74. //! the inpainting algorithm
  75. enum
  76. {
  77. INPAINT_NS = 0, //!< Use Navier-Stokes based method
  78. INPAINT_TELEA = 1 //!< Use the algorithm proposed by Alexandru Telea @cite Telea04
  79. };
  80. /** @brief Restores the selected region in an image using the region neighborhood.
  81. @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
  82. @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
  83. needs to be inpainted.
  84. @param dst Output image with the same size and type as src .
  85. @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
  86. by the algorithm.
  87. @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
  88. The function reconstructs the selected image area from the pixel near the area boundary. The
  89. function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
  90. objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details.
  91. @note
  92. - An example using the inpainting technique can be found at
  93. opencv_source_code/samples/cpp/inpaint.cpp
  94. - (Python) An example using the inpainting technique can be found at
  95. opencv_source_code/samples/python/inpaint.py
  96. */
  97. CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
  98. OutputArray dst, double inpaintRadius, int flags );
  99. //! @} photo_inpaint
  100. //! @addtogroup photo_denoise
  101. //! @{
  102. /** @brief Perform image denoising using Non-local Means Denoising algorithm
  103. <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
  104. optimizations. Noise expected to be a gaussian white noise
  105. @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
  106. @param dst Output image with the same size and type as src .
  107. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  108. Should be odd. Recommended value 7 pixels
  109. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  110. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  111. denoising time. Recommended value 21 pixels
  112. @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
  113. removes image details, smaller h value preserves details but also preserves some noise
  114. This function expected to be applied to grayscale images. For colored images look at
  115. fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
  116. image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
  117. image to CIELAB colorspace and then separately denoise L and AB components with different h
  118. parameter.
  119. */
  120. CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
  121. int templateWindowSize = 7, int searchWindowSize = 21);
  122. /** @brief Perform image denoising using Non-local Means Denoising algorithm
  123. <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
  124. optimizations. Noise expected to be a gaussian white noise
  125. @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
  126. 2-channel, 3-channel or 4-channel image.
  127. @param dst Output image with the same size and type as src .
  128. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  129. Should be odd. Recommended value 7 pixels
  130. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  131. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  132. denoising time. Recommended value 21 pixels
  133. @param h Array of parameters regulating filter strength, either one
  134. parameter applied to all channels or one per channel in dst. Big h value
  135. perfectly removes noise but also removes image details, smaller h
  136. value preserves details but also preserves some noise
  137. @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
  138. This function expected to be applied to grayscale images. For colored images look at
  139. fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
  140. image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
  141. image to CIELAB colorspace and then separately denoise L and AB components with different h
  142. parameter.
  143. */
  144. CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst,
  145. const std::vector<float>& h,
  146. int templateWindowSize = 7, int searchWindowSize = 21,
  147. int normType = NORM_L2);
  148. /** @brief Modification of fastNlMeansDenoising function for colored images
  149. @param src Input 8-bit 3-channel image.
  150. @param dst Output image with the same size and type as src .
  151. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  152. Should be odd. Recommended value 7 pixels
  153. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  154. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  155. denoising time. Recommended value 21 pixels
  156. @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
  157. removes noise but also removes image details, smaller h value preserves details but also preserves
  158. some noise
  159. @param hColor The same as h but for color components. For most images value equals 10
  160. will be enough to remove colored noise and do not distort colors
  161. The function converts image to CIELAB colorspace and then separately denoise L and AB components
  162. with given h parameters using fastNlMeansDenoising function.
  163. */
  164. CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
  165. float h = 3, float hColor = 3,
  166. int templateWindowSize = 7, int searchWindowSize = 21);
  167. /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
  168. captured in small period of time. For example video. This version of the function is for grayscale
  169. images or for manual manipulation with colorspaces. For more details see
  170. <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
  171. @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
  172. 4-channel images sequence. All images should have the same type and
  173. size.
  174. @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
  175. @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
  176. be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
  177. imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
  178. srcImgs[imgToDenoiseIndex] image.
  179. @param dst Output image with the same size and type as srcImgs images.
  180. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  181. Should be odd. Recommended value 7 pixels
  182. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  183. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  184. denoising time. Recommended value 21 pixels
  185. @param h Parameter regulating filter strength. Bigger h value
  186. perfectly removes noise but also removes image details, smaller h
  187. value preserves details but also preserves some noise
  188. */
  189. CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
  190. int imgToDenoiseIndex, int temporalWindowSize,
  191. float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
  192. /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
  193. captured in small period of time. For example video. This version of the function is for grayscale
  194. images or for manual manipulation with colorspaces. For more details see
  195. <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
  196. @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
  197. 2-channel, 3-channel or 4-channel images sequence. All images should
  198. have the same type and size.
  199. @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
  200. @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
  201. be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
  202. imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
  203. srcImgs[imgToDenoiseIndex] image.
  204. @param dst Output image with the same size and type as srcImgs images.
  205. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  206. Should be odd. Recommended value 7 pixels
  207. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  208. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  209. denoising time. Recommended value 21 pixels
  210. @param h Array of parameters regulating filter strength, either one
  211. parameter applied to all channels or one per channel in dst. Big h value
  212. perfectly removes noise but also removes image details, smaller h
  213. value preserves details but also preserves some noise
  214. @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
  215. */
  216. CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
  217. int imgToDenoiseIndex, int temporalWindowSize,
  218. const std::vector<float>& h,
  219. int templateWindowSize = 7, int searchWindowSize = 21,
  220. int normType = NORM_L2);
  221. /** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
  222. @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
  223. size.
  224. @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
  225. @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
  226. be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
  227. imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
  228. srcImgs[imgToDenoiseIndex] image.
  229. @param dst Output image with the same size and type as srcImgs images.
  230. @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
  231. Should be odd. Recommended value 7 pixels
  232. @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
  233. given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
  234. denoising time. Recommended value 21 pixels
  235. @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
  236. removes noise but also removes image details, smaller h value preserves details but also preserves
  237. some noise.
  238. @param hColor The same as h but for color components.
  239. The function converts images to CIELAB colorspace and then separately denoise L and AB components
  240. with given h parameters using fastNlMeansDenoisingMulti function.
  241. */
  242. CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
  243. int imgToDenoiseIndex, int temporalWindowSize,
  244. float h = 3, float hColor = 3,
  245. int templateWindowSize = 7, int searchWindowSize = 21);
  246. /** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
  247. finding a function to minimize some functional). As the image denoising, in particular, may be seen
  248. as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
  249. exactly what is implemented.
  250. It should be noted, that this implementation was taken from the July 2013 blog entry
  251. @cite MA13 , which also contained (slightly more general) ready-to-use source code on Python.
  252. Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
  253. of July 2013 and finally it was slightly adapted by later authors.
  254. Although the thorough discussion and justification of the algorithm involved may be found in
  255. @cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin
  256. with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
  257. pixels (it may be seen as set
  258. \f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some
  259. \f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with
  260. this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula
  261. \f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f]
  262. \f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our
  263. image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
  264. we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is
  265. exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
  266. @param observations This array should contain one or more noised versions of the image that is to
  267. be restored.
  268. @param result Here the denoised image will be stored. There is no need to do pre-allocation of
  269. storage space, as it will be automatically allocated, if necessary.
  270. @param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth
  271. (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
  272. speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
  273. removed.
  274. @param niters Number of iterations that the algorithm will run. Of course, as more iterations as
  275. better, but it is hard to quantitatively refine this statement, so just use the default and
  276. increase it if the results are poor.
  277. */
  278. CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);
  279. //! @} photo_denoise
  280. //! @addtogroup photo_hdr
  281. //! @{
  282. enum { LDR_SIZE = 256 };
  283. /** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range.
  284. */
  285. class CV_EXPORTS_W Tonemap : public Algorithm
  286. {
  287. public:
  288. /** @brief Tonemaps image
  289. @param src source image - CV_32FC3 Mat (float 32 bits 3 channels)
  290. @param dst destination image - CV_32FC3 Mat with values in [0, 1] range
  291. */
  292. CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
  293. CV_WRAP virtual float getGamma() const = 0;
  294. CV_WRAP virtual void setGamma(float gamma) = 0;
  295. };
  296. /** @brief Creates simple linear mapper with gamma correction
  297. @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma
  298. equal to 2.2f is suitable for most displays.
  299. Generally gamma \> 1 brightens the image and gamma \< 1 darkens it.
  300. */
  301. CV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f);
  302. /** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in
  303. logarithmic domain.
  304. Since it's a global operator the same function is applied to all the pixels, it is controlled by the
  305. bias parameter.
  306. Optional saturation enhancement is possible as described in @cite FL02 .
  307. For more information see @cite DM03 .
  308. */
  309. class CV_EXPORTS_W TonemapDrago : public Tonemap
  310. {
  311. public:
  312. CV_WRAP virtual float getSaturation() const = 0;
  313. CV_WRAP virtual void setSaturation(float saturation) = 0;
  314. CV_WRAP virtual float getBias() const = 0;
  315. CV_WRAP virtual void setBias(float bias) = 0;
  316. };
  317. /** @brief Creates TonemapDrago object
  318. @param gamma gamma value for gamma correction. See createTonemap
  319. @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
  320. than 1 increase saturation and values less than 1 decrease it.
  321. @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best
  322. results, default value is 0.85.
  323. */
  324. CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);
  325. /** @brief This is a global tonemapping operator that models human visual system.
  326. Mapping function is controlled by adaptation parameter, that is computed using light adaptation and
  327. color adaptation.
  328. For more information see @cite RD05 .
  329. */
  330. class CV_EXPORTS_W TonemapReinhard : public Tonemap
  331. {
  332. public:
  333. CV_WRAP virtual float getIntensity() const = 0;
  334. CV_WRAP virtual void setIntensity(float intensity) = 0;
  335. CV_WRAP virtual float getLightAdaptation() const = 0;
  336. CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;
  337. CV_WRAP virtual float getColorAdaptation() const = 0;
  338. CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
  339. };
  340. /** @brief Creates TonemapReinhard object
  341. @param gamma gamma value for gamma correction. See createTonemap
  342. @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
  343. @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
  344. value, if 0 it's global, otherwise it's a weighted mean of this two cases.
  345. @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
  346. if 0 adaptation level is the same for each channel.
  347. */
  348. CV_EXPORTS_W Ptr<TonemapReinhard>
  349. createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);
  350. /** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid,
  351. transforms contrast values to HVS response and scales the response. After this the image is
  352. reconstructed from new contrast values.
  353. For more information see @cite MM06 .
  354. */
  355. class CV_EXPORTS_W TonemapMantiuk : public Tonemap
  356. {
  357. public:
  358. CV_WRAP virtual float getScale() const = 0;
  359. CV_WRAP virtual void setScale(float scale) = 0;
  360. CV_WRAP virtual float getSaturation() const = 0;
  361. CV_WRAP virtual void setSaturation(float saturation) = 0;
  362. };
  363. /** @brief Creates TonemapMantiuk object
  364. @param gamma gamma value for gamma correction. See createTonemap
  365. @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
  366. dynamic range. Values from 0.6 to 0.9 produce best results.
  367. @param saturation saturation enhancement value. See createTonemapDrago
  368. */
  369. CV_EXPORTS_W Ptr<TonemapMantiuk>
  370. createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);
  371. /** @brief The base class for algorithms that align images of the same scene with different exposures
  372. */
  373. class CV_EXPORTS_W AlignExposures : public Algorithm
  374. {
  375. public:
  376. /** @brief Aligns images
  377. @param src vector of input images
  378. @param dst vector of aligned images
  379. @param times vector of exposure time values for each image
  380. @param response 256x1 matrix with inverse camera response function for each pixel value, it should
  381. have the same number of channels as images.
  382. */
  383. CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
  384. InputArray times, InputArray response) = 0;
  385. };
  386. /** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median
  387. luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
  388. It is invariant to exposure, so exposure values and camera response are not necessary.
  389. In this implementation new image regions are filled with zeros.
  390. For more information see @cite GW03 .
  391. */
  392. class CV_EXPORTS_W AlignMTB : public AlignExposures
  393. {
  394. public:
  395. CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
  396. InputArray times, InputArray response) CV_OVERRIDE = 0;
  397. /** @brief Short version of process, that doesn't take extra arguments.
  398. @param src vector of input images
  399. @param dst vector of aligned images
  400. */
  401. CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
  402. /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the
  403. first.
  404. @param img0 first image
  405. @param img1 second image
  406. */
  407. CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;
  408. /** @brief Helper function, that shift Mat filling new regions with zeros.
  409. @param src input image
  410. @param dst result image
  411. @param shift shift value
  412. */
  413. CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
  414. /** @brief Computes median threshold and exclude bitmaps of given image.
  415. @param img input image
  416. @param tb median threshold bitmap
  417. @param eb exclude bitmap
  418. */
  419. CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;
  420. CV_WRAP virtual int getMaxBits() const = 0;
  421. CV_WRAP virtual void setMaxBits(int max_bits) = 0;
  422. CV_WRAP virtual int getExcludeRange() const = 0;
  423. CV_WRAP virtual void setExcludeRange(int exclude_range) = 0;
  424. CV_WRAP virtual bool getCut() const = 0;
  425. CV_WRAP virtual void setCut(bool value) = 0;
  426. };
  427. /** @brief Creates AlignMTB object
  428. @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
  429. usually good enough (31 and 63 pixels shift respectively).
  430. @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
  431. median value.
  432. @param cut if true cuts images, otherwise fills the new regions with zeros.
  433. */
  434. CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
  435. /** @brief The base class for camera response calibration algorithms.
  436. */
  437. class CV_EXPORTS_W CalibrateCRF : public Algorithm
  438. {
  439. public:
  440. /** @brief Recovers inverse camera response.
  441. @param src vector of input images
  442. @param dst 256x1 matrix with inverse camera response function
  443. @param times vector of exposure time values for each image
  444. */
  445. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
  446. };
  447. /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
  448. function as linear system. Objective function is constructed using pixel values on the same position
  449. in all images, extra term is added to make the result smoother.
  450. For more information see @cite DM97 .
  451. */
  452. class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
  453. {
  454. public:
  455. CV_WRAP virtual float getLambda() const = 0;
  456. CV_WRAP virtual void setLambda(float lambda) = 0;
  457. CV_WRAP virtual int getSamples() const = 0;
  458. CV_WRAP virtual void setSamples(int samples) = 0;
  459. CV_WRAP virtual bool getRandom() const = 0;
  460. CV_WRAP virtual void setRandom(bool random) = 0;
  461. };
  462. /** @brief Creates CalibrateDebevec object
  463. @param samples number of pixel locations to use
  464. @param lambda smoothness term weight. Greater values produce smoother results, but can alter the
  465. response.
  466. @param random if true sample pixel locations are chosen at random, otherwise they form a
  467. rectangular grid.
  468. */
  469. CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);
  470. /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
  471. function as linear system. This algorithm uses all image pixels.
  472. For more information see @cite RB99 .
  473. */
  474. class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
  475. {
  476. public:
  477. CV_WRAP virtual int getMaxIter() const = 0;
  478. CV_WRAP virtual void setMaxIter(int max_iter) = 0;
  479. CV_WRAP virtual float getThreshold() const = 0;
  480. CV_WRAP virtual void setThreshold(float threshold) = 0;
  481. CV_WRAP virtual Mat getRadiance() const = 0;
  482. };
  483. /** @brief Creates CalibrateRobertson object
  484. @param max_iter maximal number of Gauss-Seidel solver iterations.
  485. @param threshold target difference between results of two successive steps of the minimization.
  486. */
  487. CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
  488. /** @brief The base class algorithms that can merge exposure sequence to a single image.
  489. */
  490. class CV_EXPORTS_W MergeExposures : public Algorithm
  491. {
  492. public:
  493. /** @brief Merges images.
  494. @param src vector of input images
  495. @param dst result image
  496. @param times vector of exposure time values for each image
  497. @param response 256x1 matrix with inverse camera response function for each pixel value, it should
  498. have the same number of channels as images.
  499. */
  500. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
  501. InputArray times, InputArray response) = 0;
  502. };
  503. /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
  504. values and camera response.
  505. For more information see @cite DM97 .
  506. */
  507. class CV_EXPORTS_W MergeDebevec : public MergeExposures
  508. {
  509. public:
  510. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
  511. InputArray times, InputArray response) CV_OVERRIDE = 0;
  512. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
  513. };
  514. /** @brief Creates MergeDebevec object
  515. */
  516. CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();
  517. /** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are
  518. combined using laplacian pyramids.
  519. The resulting image weight is constructed as weighted average of contrast, saturation and
  520. well-exposedness measures.
  521. The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying
  522. by 255, but it's recommended to apply gamma correction and/or linear tonemapping.
  523. For more information see @cite MK07 .
  524. */
  525. class CV_EXPORTS_W MergeMertens : public MergeExposures
  526. {
  527. public:
  528. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
  529. InputArray times, InputArray response) CV_OVERRIDE = 0;
  530. /** @brief Short version of process, that doesn't take extra arguments.
  531. @param src vector of input images
  532. @param dst result image
  533. */
  534. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;
  535. CV_WRAP virtual float getContrastWeight() const = 0;
  536. CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0;
  537. CV_WRAP virtual float getSaturationWeight() const = 0;
  538. CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0;
  539. CV_WRAP virtual float getExposureWeight() const = 0;
  540. CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0;
  541. };
  542. /** @brief Creates MergeMertens object
  543. @param contrast_weight contrast measure weight. See MergeMertens.
  544. @param saturation_weight saturation measure weight
  545. @param exposure_weight well-exposedness measure weight
  546. */
  547. CV_EXPORTS_W Ptr<MergeMertens>
  548. createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
  549. /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
  550. values and camera response.
  551. For more information see @cite RB99 .
  552. */
  553. class CV_EXPORTS_W MergeRobertson : public MergeExposures
  554. {
  555. public:
  556. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
  557. InputArray times, InputArray response) CV_OVERRIDE = 0;
  558. CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
  559. };
  560. /** @brief Creates MergeRobertson object
  561. */
  562. CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
  563. //! @} photo_hdr
  564. //! @addtogroup photo_decolor
  565. //! @{
  566. /** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
  567. black-and-white photograph rendering, and in many single channel image processing applications
  568. @cite CL12 .
  569. @param src Input 8-bit 3-channel image.
  570. @param grayscale Output 8-bit 1-channel image.
  571. @param color_boost Output 8-bit 3-channel image.
  572. This function is to be applied on color images.
  573. */
  574. CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
  575. //! @} photo_decolor
  576. //! @addtogroup photo_clone
  577. //! @{
  578. //! seamlessClone algorithm flags
  579. enum
  580. {
  581. /** The power of the method is fully expressed when inserting objects with complex outlines into a new background*/
  582. NORMAL_CLONE = 1,
  583. /** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable
  584. halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.*/
  585. MIXED_CLONE = 2,
  586. /** Monochrome transfer allows the user to easily replace certain features of one object by alternative features.*/
  587. MONOCHROME_TRANSFER = 3};
  588. /** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp
  589. An example using seamlessClone function
  590. */
  591. /** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,
  592. deformations) or local changes concerned to a selection. Here we are interested in achieving local
  593. changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
  594. manner. The extent of the changes ranges from slight distortions to complete replacement by novel
  595. content @cite PM03 .
  596. @param src Input 8-bit 3-channel image.
  597. @param dst Input 8-bit 3-channel image.
  598. @param mask Input 8-bit 1 or 3-channel image.
  599. @param p Point in dst image where object is placed.
  600. @param blend Output image with the same size and type as dst.
  601. @param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
  602. */
  603. CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
  604. OutputArray blend, int flags);
  605. /** @brief Given an original color image, two differently colored versions of this image can be mixed
  606. seamlessly.
  607. @param src Input 8-bit 3-channel image.
  608. @param mask Input 8-bit 1 or 3-channel image.
  609. @param dst Output image with the same size and type as src .
  610. @param red_mul R-channel multiply factor.
  611. @param green_mul G-channel multiply factor.
  612. @param blue_mul B-channel multiply factor.
  613. Multiplication factor is between .5 to 2.5.
  614. */
  615. CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f,
  616. float green_mul = 1.0f, float blue_mul = 1.0f);
  617. /** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and
  618. then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
  619. @param src Input 8-bit 3-channel image.
  620. @param mask Input 8-bit 1 or 3-channel image.
  621. @param dst Output image with the same size and type as src.
  622. @param alpha Value ranges between 0-2.
  623. @param beta Value ranges between 0-2.
  624. This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
  625. */
  626. CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst,
  627. float alpha = 0.2f, float beta = 0.4f);
  628. /** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
  629. washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
  630. @param src Input 8-bit 3-channel image.
  631. @param mask Input 8-bit 1 or 3-channel image.
  632. @param dst Output image with the same size and type as src.
  633. @param low_threshold %Range from 0 to 100.
  634. @param high_threshold Value \> 100.
  635. @param kernel_size The size of the Sobel kernel to be used.
  636. @note
  637. The algorithm assumes that the color of the source image is close to that of the destination. This
  638. assumption means that when the colors don't match, the source image color gets tinted toward the
  639. color of the destination image.
  640. */
  641. CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst,
  642. float low_threshold = 30, float high_threshold = 45,
  643. int kernel_size = 3);
  644. //! @} photo_clone
  645. //! @addtogroup photo_render
  646. //! @{
  647. //! Edge preserving filters
  648. enum
  649. {
  650. RECURS_FILTER = 1, //!< Recursive Filtering
  651. NORMCONV_FILTER = 2 //!< Normalized Convolution Filtering
  652. };
  653. /** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
  654. filters are used in many different applications @cite EM11 .
  655. @param src Input 8-bit 3-channel image.
  656. @param dst Output 8-bit 3-channel image.
  657. @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
  658. @param sigma_s %Range between 0 to 200.
  659. @param sigma_r %Range between 0 to 1.
  660. */
  661. CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
  662. float sigma_s = 60, float sigma_r = 0.4f);
  663. /** @brief This filter enhances the details of a particular image.
  664. @param src Input 8-bit 3-channel image.
  665. @param dst Output image with the same size and type as src.
  666. @param sigma_s %Range between 0 to 200.
  667. @param sigma_r %Range between 0 to 1.
  668. */
  669. CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
  670. float sigma_r = 0.15f);
  671. /** @example samples/cpp/tutorial_code/photo/non_photorealistic_rendering/npr_demo.cpp
  672. An example using non-photorealistic line drawing functions
  673. */
  674. /** @brief Pencil-like non-photorealistic line drawing
  675. @param src Input 8-bit 3-channel image.
  676. @param dst1 Output 8-bit 1-channel image.
  677. @param dst2 Output image with the same size and type as src.
  678. @param sigma_s %Range between 0 to 200.
  679. @param sigma_r %Range between 0 to 1.
  680. @param shade_factor %Range between 0 to 0.1.
  681. */
  682. CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
  683. float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
  684. /** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on
  685. photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
  686. contrast while preserving, or enhancing, high-contrast features.
  687. @param src Input 8-bit 3-channel image.
  688. @param dst Output image with the same size and type as src.
  689. @param sigma_s %Range between 0 to 200.
  690. @param sigma_r %Range between 0 to 1.
  691. */
  692. CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
  693. float sigma_r = 0.45f);
  694. //! @} photo_render
  695. //! @} photo
  696. } // cv
  697. #endif