Name
match_fundamental_matrix_distortion_ransacT_match_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacmatch_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacMatchFundamentalMatrixDistortionRansac — Compute the fundamental matrix and the radial distortion coefficient
for a pair of stereo images by automatically finding correspondences
between image points.
match_fundamental_matrix_distortion_ransac(Image1, Image2 : : Rows1, Cols1, Rows2, Cols2, GrayMatchMethod, MaskSize, RowMove, ColMove, RowTolerance, ColTolerance, Rotation, MatchThreshold, EstimationMethod, DistanceThreshold, RandSeed : FMatrix, Kappa, Error, Points1, Points2)
Herror T_match_fundamental_matrix_distortion_ransac(const Hobject Image1, const Hobject Image2, const Htuple Rows1, const Htuple Cols1, const Htuple Rows2, const Htuple Cols2, const Htuple GrayMatchMethod, const Htuple MaskSize, const Htuple RowMove, const Htuple ColMove, const Htuple RowTolerance, const Htuple ColTolerance, const Htuple Rotation, const Htuple MatchThreshold, const Htuple EstimationMethod, const Htuple DistanceThreshold, const Htuple RandSeed, Htuple* FMatrix, Htuple* Kappa, Htuple* Error, Htuple* Points1, Htuple* Points2)
Herror match_fundamental_matrix_distortion_ransac(Hobject Image1, Hobject Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& GrayMatchMethod, const HTuple& MaskSize, const HTuple& RowMove, const HTuple& ColMove, const HTuple& RowTolerance, const HTuple& ColTolerance, const HTuple& Rotation, const HTuple& MatchThreshold, const HTuple& EstimationMethod, const HTuple& DistanceThreshold, const HTuple& RandSeed, HTuple* FMatrix, HTuple* Kappa, HTuple* Error, HTuple* Points1, HTuple* Points2)
HTuple HImage::MatchFundamentalMatrixDistortionRansac(const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& GrayMatchMethod, const HTuple& MaskSize, const HTuple& RowMove, const HTuple& ColMove, const HTuple& RowTolerance, const HTuple& ColTolerance, const HTuple& Rotation, const HTuple& MatchThreshold, const HTuple& EstimationMethod, const HTuple& DistanceThreshold, const HTuple& RandSeed, HTuple* Kappa, HTuple* Error, HTuple* Points1, HTuple* Points2) const
void MatchFundamentalMatrixDistortionRansac(const HObject& Image1, const HObject& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& GrayMatchMethod, const HTuple& MaskSize, const HTuple& RowMove, const HTuple& ColMove, const HTuple& RowTolerance, const HTuple& ColTolerance, const HTuple& Rotation, const HTuple& MatchThreshold, const HTuple& EstimationMethod, const HTuple& DistanceThreshold, const HTuple& RandSeed, HTuple* FMatrix, HTuple* Kappa, HTuple* Error, HTuple* Points1, HTuple* Points2)
HHomMat2D HImage::MatchFundamentalMatrixDistortionRansac(const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HString& GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, const HTuple& Rotation, const HTuple& MatchThreshold, const HString& EstimationMethod, const HTuple& DistanceThreshold, Hlong RandSeed, double* Kappa, double* Error, HTuple* Points1, HTuple* Points2) const
HHomMat2D HImage::MatchFundamentalMatrixDistortionRansac(const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HString& GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, double Rotation, Hlong MatchThreshold, const HString& EstimationMethod, double DistanceThreshold, Hlong RandSeed, double* Kappa, double* Error, HTuple* Points1, HTuple* Points2) const
HHomMat2D HImage::MatchFundamentalMatrixDistortionRansac(const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const char* GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, double Rotation, Hlong MatchThreshold, const char* EstimationMethod, double DistanceThreshold, Hlong RandSeed, double* Kappa, double* Error, HTuple* Points1, HTuple* Points2) const
double HHomMat2D::MatchFundamentalMatrixDistortionRansac(const HImage& Image1, const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HString& GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, const HTuple& Rotation, const HTuple& MatchThreshold, const HString& EstimationMethod, const HTuple& DistanceThreshold, Hlong RandSeed, double* Error, HTuple* Points1, HTuple* Points2)
double HHomMat2D::MatchFundamentalMatrixDistortionRansac(const HImage& Image1, const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HString& GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, double Rotation, Hlong MatchThreshold, const HString& EstimationMethod, double DistanceThreshold, Hlong RandSeed, double* Error, HTuple* Points1, HTuple* Points2)
double HHomMat2D::MatchFundamentalMatrixDistortionRansac(const HImage& Image1, const HImage& Image2, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const char* GrayMatchMethod, Hlong MaskSize, Hlong RowMove, Hlong ColMove, Hlong RowTolerance, Hlong ColTolerance, double Rotation, Hlong MatchThreshold, const char* EstimationMethod, double DistanceThreshold, Hlong RandSeed, double* Error, HTuple* Points1, HTuple* Points2)
void HOperatorSetX.MatchFundamentalMatrixDistortionRansac(
[in] IHUntypedObjectX* Image1, [in] IHUntypedObjectX* Image2, [in] VARIANT Rows1, [in] VARIANT Cols1, [in] VARIANT Rows2, [in] VARIANT Cols2, [in] VARIANT GrayMatchMethod, [in] VARIANT MaskSize, [in] VARIANT RowMove, [in] VARIANT ColMove, [in] VARIANT RowTolerance, [in] VARIANT ColTolerance, [in] VARIANT Rotation, [in] VARIANT MatchThreshold, [in] VARIANT EstimationMethod, [in] VARIANT DistanceThreshold, [in] VARIANT RandSeed, [out] VARIANT* FMatrix, [out] VARIANT* Kappa, [out] VARIANT* Error, [out] VARIANT* Points1, [out] VARIANT* Points2)
IHHomMat2DX* HImageX.MatchFundamentalMatrixDistortionRansac(
[in] IHImageX* Image2, [in] VARIANT Rows1, [in] VARIANT Cols1, [in] VARIANT Rows2, [in] VARIANT Cols2, [in] BSTR GrayMatchMethod, [in] Hlong MaskSize, [in] Hlong RowMove, [in] Hlong ColMove, [in] Hlong RowTolerance, [in] Hlong ColTolerance, [in] VARIANT Rotation, [in] VARIANT MatchThreshold, [in] BSTR EstimationMethod, [in] VARIANT DistanceThreshold, [in] Hlong RandSeed, [out] double* Kappa, [out] double* Error, [out] VARIANT* Points1, [out] VARIANT* Points2)
double HHomMat2DX.MatchFundamentalMatrixDistortionRansac(
[in] IHImageX* Image1, [in] IHImageX* Image2, [in] VARIANT Rows1, [in] VARIANT Cols1, [in] VARIANT Rows2, [in] VARIANT Cols2, [in] BSTR GrayMatchMethod, [in] Hlong MaskSize, [in] Hlong RowMove, [in] Hlong ColMove, [in] Hlong RowTolerance, [in] Hlong ColTolerance, [in] VARIANT Rotation, [in] VARIANT MatchThreshold, [in] BSTR EstimationMethod, [in] VARIANT DistanceThreshold, [in] Hlong RandSeed, [out] double* Error, [out] VARIANT* Points1, [out] VARIANT* Points2)
static void HOperatorSet.MatchFundamentalMatrixDistortionRansac(HObject image1, HObject image2, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, HTuple grayMatchMethod, HTuple maskSize, HTuple rowMove, HTuple colMove, HTuple rowTolerance, HTuple colTolerance, HTuple rotation, HTuple matchThreshold, HTuple estimationMethod, HTuple distanceThreshold, HTuple randSeed, out HTuple FMatrix, out HTuple kappa, out HTuple error, out HTuple points1, out HTuple points2)
HHomMat2D HImage.MatchFundamentalMatrixDistortionRansac(HImage image2, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, string grayMatchMethod, int maskSize, int rowMove, int colMove, int rowTolerance, int colTolerance, HTuple rotation, HTuple matchThreshold, string estimationMethod, HTuple distanceThreshold, int randSeed, out double kappa, out double error, out HTuple points1, out HTuple points2)
HHomMat2D HImage.MatchFundamentalMatrixDistortionRansac(HImage image2, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, string grayMatchMethod, int maskSize, int rowMove, int colMove, int rowTolerance, int colTolerance, double rotation, int matchThreshold, string estimationMethod, double distanceThreshold, int randSeed, out double kappa, out double error, out HTuple points1, out HTuple points2)
double HHomMat2D.MatchFundamentalMatrixDistortionRansac(HImage image1, HImage image2, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, string grayMatchMethod, int maskSize, int rowMove, int colMove, int rowTolerance, int colTolerance, HTuple rotation, HTuple matchThreshold, string estimationMethod, HTuple distanceThreshold, int randSeed, out double error, out HTuple points1, out HTuple points2)
double HHomMat2D.MatchFundamentalMatrixDistortionRansac(HImage image1, HImage image2, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, string grayMatchMethod, int maskSize, int rowMove, int colMove, int rowTolerance, int colTolerance, double rotation, int matchThreshold, string estimationMethod, double distanceThreshold, int randSeed, out double error, out HTuple points1, out HTuple points2)
Given a set of coordinates of characteristic points
(Rows1Rows1Rows1Rows1Rows1rows1,Cols1Cols1Cols1Cols1Cols1cols1) and
(Rows2Rows2Rows2Rows2Rows2rows2,Cols2Cols2Cols2Cols2Cols2cols2) in the stereo images
Image1Image1Image1Image1Image1image1 and Image2Image2Image2Image2Image2image2, which must be of identical
size, match_fundamental_matrix_distortion_ransacmatch_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacmatch_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacMatchFundamentalMatrixDistortionRansac
automatically finds the correspondences between the characteristic
points and determines the geometry of the stereo setup. For unknown
cameras the geometry of the stereo setup is represented by the
fundamental matrix FMatrixFMatrixFMatrixFMatrixFMatrixFMatrix and the radial distortion
coefficient KappaKappaKappaKappaKappakappa. All corresponding points
must fulfill the epipolar constraint:
T
/ c2 \ / c1 \
| r2 | * FMatrix * | r1 | = 0 .
\ 1 / \ 1 /
Here, (r1,c1) and (r2,c2)
denote image points that are obtained by undistorting the input
image points with the division model (see
calibrate_camerascalibrate_camerasCalibrateCamerascalibrate_camerasCalibrateCamerasCalibrateCameras):
r = r' / (1+Kappa*(r'^2+c'^2)
c = c' / (1+Kappa*(r'^2+c'^2)
Here, (r1',c1') =
(Rows1Rows1Rows1Rows1Rows1rows1-0.5*(h-1),Cols1Cols1Cols1Cols1Cols1cols1-0.5*(w-1)) and (r2',c2') =
(Rows2Rows2Rows2Rows2Rows2rows2-0.5*(h-1),Cols2Cols2Cols2Cols2Cols2cols2-0.5*(w-1)) denote the
distorted image points, specified relative to the image center, and
w and h denote the width and height of the input images. Thus,
match_fundamental_matrix_distortion_ransacmatch_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacmatch_fundamental_matrix_distortion_ransacMatchFundamentalMatrixDistortionRansacMatchFundamentalMatrixDistortionRansac assumes that the
principal point of the camera, i.e., the center of the radial
distortions, lies at the center of the image.
The returned KappaKappaKappaKappaKappakappa can be used to construct camera
parameters that can be used to rectify images or points (see
change_radial_distortion_cam_parchange_radial_distortion_cam_parChangeRadialDistortionCamParchange_radial_distortion_cam_parChangeRadialDistortionCamParChangeRadialDistortionCamPar,
change_radial_distortion_imagechange_radial_distortion_imageChangeRadialDistortionImagechange_radial_distortion_imageChangeRadialDistortionImageChangeRadialDistortionImage, and
change_radial_distortion_pointschange_radial_distortion_pointsChangeRadialDistortionPointschange_radial_distortion_pointsChangeRadialDistortionPointsChangeRadialDistortionPoints):
CamPar = [0.0,KappaKappaKappaKappaKappakappa,1.0,1.0,0.5*(w-1),0.5*(h-1),w,h]
Note the column/row ordering in the point coordinates above: since
the fundamental matrix encodes the projective relation between two
stereo images embedded in 3D space, the x/y notation must be
compliant with the camera coordinate system. Therefore, (x,y)
coordinates correspond to (column,row) pairs.
The matching process is based on characteristic points, which can be
extracted with point operators like points_foerstnerpoints_foerstnerPointsFoerstnerpoints_foerstnerPointsFoerstnerPointsFoerstner or
points_harrispoints_harrisPointsHarrispoints_harrisPointsHarrisPointsHarris. The matching itself is carried out in two
steps: first, gray value correlations of mask windows around the
input points in the first and the second image are determined and an
initial matching between them is generated using the similarity of
the windows in both images. Then, the RANSAC algorithm is applied
to find the fundamental matrix and radial distortion coefficient
that maximizes the number of correspondences under the epipolar
constraint.
The size of the mask windows used for the matching is
MaskSizeMaskSizeMaskSizeMaskSizeMaskSizemaskSize x
MaskSizeMaskSizeMaskSizeMaskSizeMaskSizemaskSize. Three metrics for the correlation can be
selected. If GrayMatchMethodGrayMatchMethodGrayMatchMethodGrayMatchMethodGrayMatchMethodgrayMatchMethod has the value 'ssd'"ssd""ssd""ssd""ssd""ssd",
the sum of the squared gray value differences is used,
'sad'"sad""sad""sad""sad""sad" means the sum of absolute differences, and
'ncc'"ncc""ncc""ncc""ncc""ncc" is the normalized cross correlation. For details
please refer to binocular_disparitybinocular_disparityBinocularDisparitybinocular_disparityBinocularDisparityBinocularDisparity. The metric is
minimized ('ssd'"ssd""ssd""ssd""ssd""ssd", 'sad'"sad""sad""sad""sad""sad") or maximized
('ncc'"ncc""ncc""ncc""ncc""ncc") over all possible point pairs. A matching thus
found is only accepted if the value of the metric is below the value
of MatchThresholdMatchThresholdMatchThresholdMatchThresholdMatchThresholdmatchThreshold ('ssd'"ssd""ssd""ssd""ssd""ssd", 'sad'"sad""sad""sad""sad""sad") or above
that value ('ncc'"ncc""ncc""ncc""ncc""ncc").
To increase the speed of the algorithm the search area for the match
candidates can be limited to a rectangle by specifying its size and
offset. Only points within a window of 2*RowToleranceRowToleranceRowToleranceRowToleranceRowTolerancerowTolerance x
2*ColToleranceColToleranceColToleranceColToleranceColTolerancecolTolerance points are considered. The offset of the
center of the search window in the second image with respect to the
position of the current point in the first image is given by
RowMoveRowMoveRowMoveRowMoveRowMoverowMove and ColMoveColMoveColMoveColMoveColMovecolMove.
If the second camera is rotated around the optical axis with respect
to the first camera, the parameter RotationRotationRotationRotationRotationrotation may contain an
estimate for the rotation angle or an angle interval in radians. A
good guess will increase the quality of the gray value matching. If
the actual rotation differs too much from the specified estimate,
the matching will typically fail. In this case, an angle interval
should be specified and RotationRotationRotationRotationRotationrotation is a tuple with two
elements. The larger the given interval is the slower is the
operator is since the RANSAC algorithm is run over all
(automatically determined) angle increments within the interval.
After the initial matching has been completed, a randomized search
algorithm (RANSAC) is used to determine the fundamental matrix
FMatrixFMatrixFMatrixFMatrixFMatrixFMatrix and the radial distortion coefficient
KappaKappaKappaKappaKappakappa. It tries to find the parameters that are consistent
with a maximum number of correspondences. For a point to be
accepted, the distance in pixels to its corresponding epipolar line
must not exceed the threshold DistanceThresholdDistanceThresholdDistanceThresholdDistanceThresholdDistanceThresholddistanceThreshold.
The parameter EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod decides whether the relative
orientation between the cameras is of a special type and which
algorithm is to be applied for its computation. If
EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod is either 'linear'"linear""linear""linear""linear""linear" or
'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard""gold_standard", the relative orientation is arbitrary. If
the left and right cameras are identical and the relative
orientation between them is a pure translation,
EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod can be set to 'trans_linear'"trans_linear""trans_linear""trans_linear""trans_linear""trans_linear" or
'trans_gold_standard'"trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard". The typical application for this
special motion case is the scenario of a single fixed camera looking
onto a moving conveyor belt. In order to get a unique solution for
the correspondence problem, the minimum required number of
corresponding points is nine in the general case and four in the
special translational case.
The fundamental matrix is computed by a linear algorithm if
EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod is set to 'linear'"linear""linear""linear""linear""linear" or
'trans_linear'"trans_linear""trans_linear""trans_linear""trans_linear""trans_linear". This algorithm is very fast. For the pure
translation case (EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod =
'trans_linear'"trans_linear""trans_linear""trans_linear""trans_linear""trans_linear"), the linear method returns accurate results
for small to moderate noise of the point coordinates and for most
distortions (except for very small distortions). For a general
relative orientation of the two cameras (EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod
= 'linear'"linear""linear""linear""linear""linear"), the linear method only returns accurate
results for very small noise of the point coordinates and for
sufficiently large distortions. For EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod =
'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard""gold_standard" or 'trans_gold_standard'"trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard", a
mathematically optimal but slower optimization is used, which
minimizes the geometric reprojection error of reconstructed
projective 3D points. For a general relative orientation of the two
cameras, in general EstimationMethodEstimationMethodEstimationMethodEstimationMethodEstimationMethodestimationMethod =
'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard""gold_standard" should be selected.
The value ErrorErrorErrorErrorErrorerror indicates the overall quality of the
estimation procedure and is the mean symmetric euclidian distance in
pixels between the points and their corresponding epipolar lines.
Point pairs consistent with the above constraints are considered to
be corresponding points. Points1Points1Points1Points1Points1points1 contains the indices of
the matched input points from the first image and Points2Points2Points2Points2Points2points2
contains the indices of the corresponding points in the second
image.
The parameter RandSeedRandSeedRandSeedRandSeedRandSeedrandSeed can be used to control the
randomized nature of the RANSAC algorithm, and hence to obtain
reproducible results. If RandSeedRandSeedRandSeedRandSeedRandSeedrandSeed is set to a positive
number, the operator returns the same result on every call with the
same parameters because the internally used random number generator
is initialized with RandSeedRandSeedRandSeedRandSeedRandSeedrandSeed. If RandSeedRandSeedRandSeedRandSeedRandSeedrandSeed =
0, the random number generator is initialized with the
current time. In this case the results may not be reproducible.
- Multithreading type: reentrant (runs in parallel with non-exclusive operators).
- Multithreading scope: global (may be called from any thread).
- Processed without parallelization.
Input points in image 1 (row coordinate).
Restriction: length(Rows1) >= 9 || length(Rows1) >= 4
Input points in image 1 (column coordinate).
Restriction: length(Cols1) == length(Rows1)
Input points in image 2 (row coordinate).
Restriction: length(Rows2) >= 9 || length(Rows2) >= 4
Input points in image 2 (column coordinate).
Restriction: length(Cols2) == length(Rows2)
Gray value match metric.
Default value:
'ncc'
"ncc"
"ncc"
"ncc"
"ncc"
"ncc"
List of values: 'ncc'"ncc""ncc""ncc""ncc""ncc", 'sad'"sad""sad""sad""sad""sad", 'ssd'"ssd""ssd""ssd""ssd""ssd"
Size of gray value masks.
Default value: 10
Typical range of values: 3
≤
MaskSize
MaskSize
MaskSize
MaskSize
MaskSize
maskSize
≤
15
Restriction: MaskSize >= 1
Average row coordinate offset of corresponding points.
Default value: 0
Average column coordinate offset of corresponding points.
Default value: 0
Half height of matching search window.
Default value: 200
Restriction: RowTolerance >= 1
Half width of matching search window.
Default value: 200
Restriction: ColTolerance >= 1
Estimate of the relative rotation of the second image
with respect to the first image.
Default value: 0.0
Suggested values: 0.0, 0.1, -0.1, 0.7854, 1.571, 3.142
Threshold for gray value matching.
Default value: 0.7
Suggested values: 0.9, 0.7, 0.5, 10, 20, 50, 100
Algorithm for the computation of the fundamental
matrix and for special camera orientations.
Default value:
'gold_standard'
"gold_standard"
"gold_standard"
"gold_standard"
"gold_standard"
"gold_standard"
List of values: 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard""gold_standard", 'linear'"linear""linear""linear""linear""linear", 'trans_gold_standard'"trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard""trans_gold_standard", 'trans_linear'"trans_linear""trans_linear""trans_linear""trans_linear""trans_linear"
Maximal deviation of a point from its epipolar line.
Default value: 1
Restriction: DistanceThreshold > 0
Seed for the random number generator.
Default value: 0
Computed fundamental matrix.
Computed radial distortion coefficient.
Root-Mean-Square epipolar distance error.
Indices of matched input points in image 1.
Indices of matched input points in image 2.
points_foerstner (Image1, 1, 2, 3, 200, 0.1, 'gauss', 'true', \
Rows1, Cols1, _, _, _, _, _, _, _, _)
points_foerstner (Image2, 1, 2, 3, 200, 0.1, 'gauss', 'true', \
Rows2, Cols2, _, _, _, _, _, _, _, _)
match_fundamental_matrix_distortion_ransac (Image1, Image2, \
Rows1, Cols1, Rows2, \
Cols2, 'ncc', 10, 0, 0, \
100, 200, 0, 0.5, \
'trans_gold_standard', \
1, 42, FMatrix, Kappa, \
Error, Points1, Points2)
get_image_size (Image1, Width, Height)
CamParDist := [0.0,Kappa,1.0,1.0,0.5*(Width-1),0.5*Height-1, \
Width,Height]
change_radial_distortion_cam_par ('fixed', CamParDist, 0, CamPar)
change_radial_distortion_image (Image1, Image1, Image1Rect, \
CamParDist, CamPar)
change_radial_distortion_image (Image2, Image2, Image2Rect, \
CamParDist, CamPar)
gen_binocular_proj_rectification (Map1, Map2, FMatrix, [], Width, \
Height, Width, Height, 1, \
'bilinear_map', _, H1, H2)
map_image (Image1Rect, Map1, Image1Mapped)
map_image (Image2Rect, Map2, Image2Mapped)
binocular_disparity_mg (Image1Mapped, Image2Mapped, Disparity, \
Score, 1, 30, 8, 0, 'false', \
'default_parameters', 'fast_accurate')
points_foerstnerpoints_foerstnerPointsFoerstnerpoints_foerstnerPointsFoerstnerPointsFoerstner,
points_harrispoints_harrisPointsHarrispoints_harrisPointsHarrisPointsHarris
vector_to_fundamental_matrix_distortionvector_to_fundamental_matrix_distortionVectorToFundamentalMatrixDistortionvector_to_fundamental_matrix_distortionVectorToFundamentalMatrixDistortionVectorToFundamentalMatrixDistortion,
change_radial_distortion_cam_parchange_radial_distortion_cam_parChangeRadialDistortionCamParchange_radial_distortion_cam_parChangeRadialDistortionCamParChangeRadialDistortionCamPar,
change_radial_distortion_imagechange_radial_distortion_imageChangeRadialDistortionImagechange_radial_distortion_imageChangeRadialDistortionImageChangeRadialDistortionImage,
change_radial_distortion_pointschange_radial_distortion_pointsChangeRadialDistortionPointschange_radial_distortion_pointsChangeRadialDistortionPointsChangeRadialDistortionPoints,
gen_binocular_proj_rectificationgen_binocular_proj_rectificationGenBinocularProjRectificationgen_binocular_proj_rectificationGenBinocularProjRectificationGenBinocularProjRectification
match_fundamental_matrix_ransacmatch_fundamental_matrix_ransacMatchFundamentalMatrixRansacmatch_fundamental_matrix_ransacMatchFundamentalMatrixRansacMatchFundamentalMatrixRansac,
match_essential_matrix_ransacmatch_essential_matrix_ransacMatchEssentialMatrixRansacmatch_essential_matrix_ransacMatchEssentialMatrixRansacMatchEssentialMatrixRansac,
match_rel_pose_ransacmatch_rel_pose_ransacMatchRelPoseRansacmatch_rel_pose_ransacMatchRelPoseRansacMatchRelPoseRansac,
proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansac,
calibrate_camerascalibrate_camerasCalibrateCamerascalibrate_camerasCalibrateCamerasCalibrateCameras
Richard Hartley, Andrew Zisserman: “Multiple View Geometry in
Computer Vision”; Cambridge University Press, Cambridge; 2003.
Olivier Faugeras, Quang-Tuan Luong: “The Geometry of Multiple
Images: The Laws That Govern the Formation of Multiple Images of a
Scene and Some of Their Applications”; MIT Press, Cambridge, MA;
2001.
3D Metrology