diff --git a/stubs/cv2-stubs/__init__.pyi b/stubs/cv2-stubs/__init__.pyi index d60d79ca..b05ebf26 100644 --- a/stubs/cv2-stubs/__init__.pyi +++ b/stubs/cv2-stubs/__init__.pyi @@ -1694,7 +1694,7 @@ def adaptiveThreshold(src: Mat, maxValue, adaptiveMethod, thresholdType, blockSi 'adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst\n. @brief Applies an adaptive threshold to an array.\n. \n. The function transforms a grayscale image to a binary image according to the formulae:\n. - **THRESH_BINARY**\n. \\f[dst(x,y) = \\fork{\\texttt{maxValue}}{if \\(src(x,y) > T(x,y)\\)}{0}{otherwise}\\f]\n. - **THRESH_BINARY_INV**\n. \\f[dst(x,y) = \\fork{0}{if \\(src(x,y) > T(x,y)\\)}{\\texttt{maxValue}}{otherwise}\\f]\n. where \\f$T(x,y)\\f$ is a threshold calculated individually for each pixel (see adaptiveMethod parameter).\n. \n. The function can process the image in-place.\n. \n. @param src Source 8-bit single-channel image.\n. @param dst Destination image of the same size and the same type as src.\n. @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied\n. @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.\n. The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.\n. @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,\n. see #ThresholdTypes.\n. @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the\n. pixel: 3, 5, 7, and so on.\n. @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it\n. is positive but may be zero or negative as well.\n. \n. @sa threshold, blur, GaussianBlur' ... -def add(src1: Mat, src2: Mat, dst: Mat = ..., mask: Mat = ..., dtype=...) -> typing.Any: +def add(src1: Mat | float, src2: Mat | float, dst: Mat = ..., mask: Mat = ..., dtype=...) -> typing.Any: 'add(src1, src2[, dst[, mask[, dtype]]]) -> dst\n. @brief Calculates the per-element sum of two arrays or an array and a scalar.\n. \n. The function add calculates:\n. - Sum of two arrays when both input arrays have the same size and the same number of channels:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) + \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of\n. elements as `src1.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) + \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of\n. elements as `src2.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1} + \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each\n. channel is processed independently.\n. \n. The first function in the list above can be replaced with matrix expressions:\n. @code{.cpp}\n. dst = src1 + src2;\n. dst += src1; // equivalent to add(dst, src1, dst);\n. @endcode\n. The input arrays and the output array can all have the same or different depths. For example, you\n. can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit\n. floating-point array. Depth of the output array is determined by the dtype parameter. In the second\n. and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can\n. be set to the default -1. In this case, the output array will have the same depth as the input\n. array, be it src1, src2 or both.\n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and number of channels as the input array(s); the\n. depth is defined by dtype or src1/src2.\n. @param mask optional operation mask - 8-bit single channel array, that specifies elements of the\n. output array to be changed.\n. @param dtype optional depth of the output array (see the discussion below).\n. @sa subtract, addWeighted, scaleAdd, Mat::convertTo' ... @@ -2508,10 +2508,15 @@ def haveOpenVX() -> typing.Any: 'haveOpenVX() -> retval\n.' ... -def hconcat(src: Mat, dst: Mat = ...) -> typing.Any: +@overload +def hconcat(src: list[Mat], dst: Mat = ...) -> Mat: 'hconcat(src[, dst]) -> dst\n. @overload\n. @code{.cpp}\n. std::vector matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),\n. cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),\n. cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};\n. \n. cv::Mat out;\n. cv::hconcat( matrices, out );\n. //out:\n. //[1, 2, 3;\n. // 1, 2, 3;\n. // 1, 2, 3;\n. // 1, 2, 3]\n. @endcode\n. @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.\n. @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.\n. same depth.' ... +@overload +def hconcat(src1: Mat, src2: Mat, dts: Mat = ...) -> Mat: + ... + def idct(src: Mat, dst: Mat = ..., flags: int = ...) -> typing.Any: 'idct(src[, dst[, flags]]) -> dst\n. @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.\n. \n. idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).\n. @param src input floating-point single-channel array.\n. @param dst output array of the same size and type as src.\n. @param flags operation flags.\n. @sa dct, dft, idft, getOptimalDFTSize' ... @@ -2810,10 +2815,15 @@ def recoverPose(E, points1, points2, cameraMatrix, R=..., t=..., mask: Mat = ... "recoverPose(E, points1, points2, cameraMatrix[, R[, t[, mask]]]) -> retval, R, t, mask\n. @brief Recovers the relative camera rotation and the translation from an estimated essential\n. matrix and the corresponding points in two images, using cheirality check. Returns the number of\n. inliers that pass the check.\n. \n. @param E The input essential matrix.\n. @param points1 Array of N 2D points from the first image. The point coordinates should be\n. floating-point (single or double precision).\n. @param points2 Array of the second image points of the same size and format as points1 .\n. @param cameraMatrix Camera matrix \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\n. Note that this function assumes that points1 and points2 are feature points from cameras with the\n. same camera matrix.\n. @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple\n. that performs a change of basis from the first camera's coordinate system to the second camera's\n. coordinate system. Note that, in general, t can not be used for this tuple, see the parameter\n. described below.\n. @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and\n. therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit\n. length.\n. @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks\n. inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to\n. recover pose. In the output mask only inliers which pass the cheirality check.\n. \n. This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies\n. possible pose hypotheses by doing cheirality check. The cheirality check means that the\n. triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.\n. \n. This function can be used to process the output E and mask from @ref findEssentialMat. In this\n. scenario, points1 and points2 are the same input for findEssentialMat.:\n. @code\n. // Example. Estimation of fundamental matrix using the RANSAC algorithm\n. int point_count = 100;\n. vector points1(point_count);\n. vector points2(point_count);\n. \n. // initialize the points here ...\n. for( int i = 0; i < point_count; i++ )\n. {\n. points1[i] = ...;\n. points2[i] = ...;\n. }\n. \n. // cametra matrix with both focal lengths = 1, and principal point = (0, 0)\n. Mat cameraMatrix = Mat::eye(3, 3, CV_64F);\n. \n. Mat E, R, t, mask;\n. \n. E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);\n. recoverPose(E, points1, points2, cameraMatrix, R, t, mask);\n. @endcode\n\n\n\nrecoverPose(E, points1, points2[, R[, t[, focal[, pp[, mask]]]]]) -> retval, R, t, mask\n. @overload\n. @param E The input essential matrix.\n. @param points1 Array of N 2D points from the first image. The point coordinates should be\n. floating-point (single or double precision).\n. @param points2 Array of the second image points of the same size and format as points1 .\n. @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple\n. that performs a change of basis from the first camera's coordinate system to the second camera's\n. coordinate system. Note that, in general, t can not be used for this tuple, see the parameter\n. description below.\n. @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and\n. therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit\n. length.\n. @param focal Focal length of the camera. Note that this function assumes that points1 and points2\n. are feature points from cameras with same focal length and principal point.\n. @param pp principal point of the camera.\n. @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks\n. inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to\n. recover pose. In the output mask only inliers which pass the cheirality check.\n. \n. This function differs from the one above that it computes camera matrix from focal length and\n. principal point:\n. \n. \\f[A =\n. \\begin{bmatrix}\n. f & 0 & x_{pp} \\\\\n. 0 & f & y_{pp} \\\\\n. 0 & 0 & 1\n. \\end{bmatrix}\\f]\n\n\n\nrecoverPose(E, points1, points2, cameraMatrix, distanceThresh[, R[, t[, mask[, triangulatedPoints]]]]) -> retval, R, t, mask, triangulatedPoints\n. @overload\n. @param E The input essential matrix.\n. @param points1 Array of N 2D points from the first image. The point coordinates should be\n. floating-point (single or double precision).\n. @param points2 Array of the second image points of the same size and format as points1.\n. @param cameraMatrix Camera matrix \\f$A = \\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\\f$ .\n. Note that this function assumes that points1 and points2 are feature points from cameras with the\n. same camera matrix.\n. @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple\n. that performs a change of basis from the first camera's coordinate system to the second camera's\n. coordinate system. Note that, in general, t can not be used for this tuple, see the parameter\n. description below.\n. @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and\n. therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit\n. length.\n. @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite\n. points).\n. @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks\n. inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to\n. recover pose. In the output mask only inliers which pass the cheirality check.\n. @param triangulatedPoints 3D points which were reconstructed by triangulation.\n. \n. This function differs from the one above that it outputs the triangulated 3D point that are used for\n. the cheirality check." ... -def rectangle(img: Mat, pt1, pt2, color, thickness=..., lineType=..., shift=...) -> typing.Any: +@overload +def rectangle(img: Mat, pt1: typing.Tuple[int, int], pt2: typing.Tuple[int, int], color, thickness=..., lineType=..., shift=...) -> typing.Any: 'rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws a simple, thick, or filled up-right rectangle.\n. \n. The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners\n. are pt1 and pt2.\n. \n. @param img Image.\n. @param pt1 Vertex of the rectangle.\n. @param pt2 Vertex of the rectangle opposite to pt1 .\n. @param color Rectangle color or brightness (grayscale image).\n. @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,\n. mean that the function has to draw a filled rectangle.\n. @param lineType Type of the line. See #LineTypes\n. @param shift Number of fractional bits in the point coordinates.\n\n\n\nrectangle(img, rec, color[, thickness[, lineType[, shift]]]) -> img\n. @overload\n. \n. use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and\n. r.br()-Point(1,1)` are opposite corners' ... +@overload +def rectangle(img: Mat, rec: typing.Tuple[int, int, int, int], color, thickness=..., lineType=..., shift=...) -> typing.Any: + ... + def rectify3Collinear(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, cameraMatrix3, distCoeffs3, imgpt1, imgpt3, imageSize, R12, T12, R13, T13, alpha, newImgSize, flags: int, R1=..., R2=..., R3=..., P1=..., P2=..., P3=..., Q=...) -> typing.Any: 'rectify3Collinear(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, cameraMatrix3, distCoeffs3, imgpt1, imgpt3, imageSize, R12, T12, R13, T13, alpha, newImgSize, flags[, R1[, R2[, R3[, P1[, P2[, P3[, Q]]]]]]]) -> retval, R1, R2, R3, P1, P2, P3, Q, roi1, roi2\n.' ... @@ -2838,7 +2848,7 @@ def reprojectImageTo3D(disparity, Q, _3dImage=..., handleMissingValues=..., ddep "reprojectImageTo3D(disparity, Q[, _3dImage[, handleMissingValues[, ddepth]]]) -> _3dImage\n. @brief Reprojects a disparity image to 3D space.\n. \n. @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit\n. floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no\n. fractional bits. If the disparity is 16-bit signed format, as computed by @ref StereoBM or\n. @ref StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before\n. being used here.\n. @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of\n. _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one\n. uses Q obtained by @ref stereoRectify, then the returned points are represented in the first\n. camera's rectified coordinate system.\n. @param Q \\f$4 \\times 4\\f$ perspective transformation matrix that can be obtained with\n. @ref stereoRectify.\n. @param handleMissingValues Indicates, whether the function should handle missing values (i.e.\n. points where the disparity was not computed). If handleMissingValues=true, then pixels with the\n. minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed\n. to 3D points with a very large Z value (currently set to 10000).\n. @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F\n. depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.\n. \n. The function transforms a single-channel disparity map to a 3-channel image representing a 3D\n. surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it\n. computes:\n. \n. \\f[\\begin{bmatrix}\n. X \\\\\n. Y \\\\\n. Z \\\\\n. W\n. \\end{bmatrix} = Q \\begin{bmatrix}\n. x \\\\\n. y \\\\\n. \\texttt{disparity} (x,y) \\\\\n. z\n. \\end{bmatrix}.\\f]\n. \n. @sa\n. To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform." ... -def resize(src: Mat, dsize: typing.Tuple[int, int], dst: Mat = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> Mat: +def resize(src: Mat, dsize: typing.Tuple[int, int] | None, dst: Mat = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> Mat: 'resize(src, dsize[, dst[, fx[, fy[, interpolation]]]]) -> dst\n. @brief Resizes an image.\n. \n. The function resize resizes the image src down to or up to the specified size. Note that the\n. initial dst type or size are not taken into account. Instead, the size and type are derived from\n. the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,\n. you may call the function as follows:\n. @code\n. // explicitly specify dsize=dst.size(); fx and fy will be computed from that.\n. resize(src, dst, dst.size(), 0, 0, interpolation);\n. @endcode\n. If you want to decimate the image by factor of 2 in each direction, you can call the function this\n. way:\n. @code\n. // specify fx and fy and let the function compute the destination image size.\n. resize(src, dst, Size(), 0.5, 0.5, interpolation);\n. @endcode\n. To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to\n. enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR\n. (faster but still looks OK).\n. \n. @param src input image.\n. @param dst output image; it has the size dsize (when it is non-zero) or the size computed from\n. src.size(), fx, and fy; the type of dst is the same as of src.\n. @param dsize output image size; if it equals zero, it is computed as:\n. \\f[\\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\\f]\n. Either dsize or both fx and fy must be non-zero.\n. @param fx scale factor along the horizontal axis; when it equals 0, it is computed as\n. \\f[\\texttt{(double)dsize.width/src.cols}\\f]\n. @param fy scale factor along the vertical axis; when it equals 0, it is computed as\n. \\f[\\texttt{(double)dsize.height/src.rows}\\f]\n. @param interpolation interpolation method, see #InterpolationFlags\n. \n. @sa warpAffine, warpPerspective, remap' ... @@ -3010,7 +3020,7 @@ def stylization(src: Mat, dst: Mat = ..., sigma_s=..., sigma_r=...) -> typing.An 'stylization(src[, dst[, sigma_s[, sigma_r]]]) -> dst\n. @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on\n. photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low\n. contrast while preserving, or enhancing, high-contrast features.\n. \n. @param src Input 8-bit 3-channel image.\n. @param dst Output image with the same size and type as src.\n. @param sigma_s %Range between 0 to 200.\n. @param sigma_r %Range between 0 to 1.' ... -def subtract(src1: Mat, src2: Mat, dst: Mat = ..., mask: Mat = ..., dtype=...) -> typing.Any: +def subtract(src1: Mat | float, src2: Mat | float, dst: Mat = ..., mask: Mat = ..., dtype=...) -> typing.Any: 'subtract(src1, src2[, dst[, mask[, dtype]]]) -> dst\n. @brief Calculates the per-element difference between two arrays or array and a scalar.\n. \n. The function subtract calculates:\n. - Difference between two arrays, when both input arrays have the same size and the same number of\n. channels:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) - \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Difference between an array and a scalar, when src2 is constructed from Scalar or has the same\n. number of elements as `src1.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) - \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Difference between a scalar and an array, when src1 is constructed from Scalar or has the same\n. number of elements as `src2.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1} - \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - The reverse difference between a scalar and an array in the case of `SubRS`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src2} - \\texttt{src1}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each\n. channel is processed independently.\n. \n. The first function in the list above can be replaced with matrix expressions:\n. @code{.cpp}\n. dst = src1 - src2;\n. dst -= src1; // equivalent to subtract(dst, src1, dst);\n. @endcode\n. The input arrays and the output array can all have the same or different depths. For example, you\n. can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of\n. the output array is determined by dtype parameter. In the second and third cases above, as well as\n. in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this\n. case the output array will have the same depth as the input array, be it src1, src2 or both.\n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array of the same size and the same number of channels as the input array.\n. @param mask optional operation mask; this is an 8-bit single channel array that specifies elements\n. of the output array to be changed.\n. @param dtype optional depth of the output array\n. @sa add, addWeighted, scaleAdd, Mat::convertTo' ... @@ -3066,10 +3076,15 @@ def validateDisparity(disparity, cost, minDisparity, numberOfDisparities, disp12 'validateDisparity(disparity, cost, minDisparity, numberOfDisparities[, disp12MaxDisp]) -> disparity\n.' ... -def vconcat(src: Mat, dst: Mat = ...) -> typing.Any: +@overload +def vconcat(src: list[Mat], dts: Mat = ...) -> Mat: 'vconcat(src[, dst]) -> dst\n. @overload\n. @code{.cpp}\n. std::vector matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),\n. cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),\n. cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};\n. \n. cv::Mat out;\n. cv::vconcat( matrices, out );\n. //out:\n. //[1, 1, 1, 1;\n. // 2, 2, 2, 2;\n. // 3, 3, 3, 3]\n. @endcode\n. @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth\n. @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.\n. same depth.' ... +@overload +def vconcat(src1: Mat, src2: Mat, dts: Mat = ...) -> Mat: + ... + def waitKey(delay=...) -> typing.Any: 'waitKey([, delay]) -> retval\n. @brief Waits for a pressed key.\n. \n. The function waitKey waits for a key event infinitely (when \\f$\\texttt{delay}\\leq 0\\f$ ) or for delay\n. milliseconds, when it is positive. Since the OS has a minimum time between switching threads, the\n. function will not wait exactly delay ms, it will wait at least delay ms, depending on what else is\n. running on your computer at that time. It returns the code of the pressed key or -1 if no key was\n. pressed before the specified time had elapsed.\n. \n. @note\n. \n. This function is the only method in HighGUI that can fetch and handle events, so it needs to be\n. called periodically for normal event processing unless HighGUI is used within an environment that\n. takes care of event processing.\n. \n. @note\n. \n. The function only works if there is at least one HighGUI window created and the window is active.\n. If there are several HighGUI windows, any of them can be active.\n. \n. @param delay Delay in milliseconds. 0 is the special value that means "forever".' ...