Operator Reference

stationary_camera_self_calibrationT_stationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration (Operator)

stationary_camera_self_calibrationT_stationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration — Perform a self-calibration of a stationary projective camera.

Signature

Herror T_stationary_camera_self_calibration(const Htuple NumImages, const Htuple ImageWidth, const Htuple ImageHeight, const Htuple ReferenceImage, const Htuple MappingSource, const Htuple MappingDest, const Htuple HomMatrices2D, const Htuple Rows1, const Htuple Cols1, const Htuple Rows2, const Htuple Cols2, const Htuple NumCorrespondences, const Htuple EstimationMethod, const Htuple CameraModel, const Htuple FixedCameraParams, Htuple* CameraMatrices, Htuple* Kappa, Htuple* RotationMatrices, Htuple* X, Htuple* Y, Htuple* Z, Htuple* Error)

void StationaryCameraSelfCalibration(const HTuple& NumImages, const HTuple& ImageWidth, const HTuple& ImageHeight, const HTuple& ReferenceImage, const HTuple& MappingSource, const HTuple& MappingDest, const HTuple& HomMatrices2D, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& NumCorrespondences, const HTuple& EstimationMethod, const HTuple& CameraModel, const HTuple& FixedCameraParams, HTuple* CameraMatrices, HTuple* Kappa, HTuple* RotationMatrices, HTuple* X, HTuple* Y, HTuple* Z, HTuple* Error)

static HHomMat2DArray HHomMat2D::StationaryCameraSelfCalibration(Hlong NumImages, Hlong ImageWidth, Hlong ImageHeight, Hlong ReferenceImage, const HTuple& MappingSource, const HTuple& MappingDest, const HHomMat2DArray& HomMatrices2D, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& NumCorrespondences, const HString& EstimationMethod, const HTuple& CameraModel, const HString& FixedCameraParams, HTuple* Kappa, HHomMat2DArray* RotationMatrices, HTuple* X, HTuple* Y, HTuple* Z, HTuple* Error)

static HHomMat2DArray HHomMat2D::StationaryCameraSelfCalibration(Hlong NumImages, Hlong ImageWidth, Hlong ImageHeight, Hlong ReferenceImage, const HTuple& MappingSource, const HTuple& MappingDest, const HHomMat2DArray& HomMatrices2D, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& NumCorrespondences, const HString& EstimationMethod, const HTuple& CameraModel, const HString& FixedCameraParams, double* Kappa, HHomMat2DArray* RotationMatrices, HTuple* X, HTuple* Y, HTuple* Z, double* Error)

static HHomMat2DArray HHomMat2D::StationaryCameraSelfCalibration(Hlong NumImages, Hlong ImageWidth, Hlong ImageHeight, Hlong ReferenceImage, const HTuple& MappingSource, const HTuple& MappingDest, const HHomMat2DArray& HomMatrices2D, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& NumCorrespondences, const char* EstimationMethod, const HTuple& CameraModel, const char* FixedCameraParams, double* Kappa, HHomMat2DArray* RotationMatrices, HTuple* X, HTuple* Y, HTuple* Z, double* Error)

static HHomMat2DArray HHomMat2D::StationaryCameraSelfCalibration(Hlong NumImages, Hlong ImageWidth, Hlong ImageHeight, Hlong ReferenceImage, const HTuple& MappingSource, const HTuple& MappingDest, const HHomMat2DArray& HomMatrices2D, const HTuple& Rows1, const HTuple& Cols1, const HTuple& Rows2, const HTuple& Cols2, const HTuple& NumCorrespondences, const wchar_t* EstimationMethod, const HTuple& CameraModel, const wchar_t* FixedCameraParams, double* Kappa, HHomMat2DArray* RotationMatrices, HTuple* X, HTuple* Y, HTuple* Z, double* Error)   ( Windows only)

static void HOperatorSet.StationaryCameraSelfCalibration(HTuple numImages, HTuple imageWidth, HTuple imageHeight, HTuple referenceImage, HTuple mappingSource, HTuple mappingDest, HTuple homMatrices2D, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, HTuple numCorrespondences, HTuple estimationMethod, HTuple cameraModel, HTuple fixedCameraParams, out HTuple cameraMatrices, out HTuple kappa, out HTuple rotationMatrices, out HTuple x, out HTuple y, out HTuple z, out HTuple error)

static HHomMat2D[] HHomMat2D.StationaryCameraSelfCalibration(int numImages, int imageWidth, int imageHeight, int referenceImage, HTuple mappingSource, HTuple mappingDest, HHomMat2D[] homMatrices2D, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, HTuple numCorrespondences, string estimationMethod, HTuple cameraModel, string fixedCameraParams, out HTuple kappa, out HHomMat2D[] rotationMatrices, out HTuple x, out HTuple y, out HTuple z, out HTuple error)

static HHomMat2D[] HHomMat2D.StationaryCameraSelfCalibration(int numImages, int imageWidth, int imageHeight, int referenceImage, HTuple mappingSource, HTuple mappingDest, HHomMat2D[] homMatrices2D, HTuple rows1, HTuple cols1, HTuple rows2, HTuple cols2, HTuple numCorrespondences, string estimationMethod, HTuple cameraModel, string fixedCameraParams, out double kappa, out HHomMat2D[] rotationMatrices, out HTuple x, out HTuple y, out HTuple z, out double error)

def stationary_camera_self_calibration(num_images: int, image_width: int, image_height: int, reference_image: int, mapping_source: Sequence[int], mapping_dest: Sequence[int], hom_matrices_2d: Sequence[float], rows_1: Sequence[Union[float, int]], cols_1: Sequence[Union[float, int]], rows_2: Sequence[Union[float, int]], cols_2: Sequence[Union[float, int]], num_correspondences: Sequence[int], estimation_method: str, camera_model: Sequence[str], fixed_camera_params: str) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]

def stationary_camera_self_calibration_s(num_images: int, image_width: int, image_height: int, reference_image: int, mapping_source: Sequence[int], mapping_dest: Sequence[int], hom_matrices_2d: Sequence[float], rows_1: Sequence[Union[float, int]], cols_1: Sequence[Union[float, int]], rows_2: Sequence[Union[float, int]], cols_2: Sequence[Union[float, int]], num_correspondences: Sequence[int], estimation_method: str, camera_model: Sequence[str], fixed_camera_params: str) -> Tuple[Sequence[float], float, Sequence[float], Sequence[float], Sequence[float], Sequence[float], float]

Description

stationary_camera_self_calibrationstationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration performs a self-calibration of a stationary projective camera. Here, stationary means that the camera may only rotate around the optical center and may zoom. Hence, the optical center may not move. Projective means that the camera model is a pinhole camera that can be described by a projective 3D-2D transformation. In particular, radial distortions can only be modeled for cameras with constant parameters. If the lens exhibits significant radial distortions they should be removed, at least approximately, with change_radial_distortion_imagechange_radial_distortion_imageChangeRadialDistortionImageChangeRadialDistortionImagechange_radial_distortion_image.

The camera model being used can be described as follows: Here, x is a homogeneous 2D vector, X a homogeneous 3D vector, and P a homogeneous 3x4 projection matrix. The projection matrix P can be decomposed as follows: Here, R is a 3x3 rotation matrix and t is an inhomogeneous 3D vector. These two entities describe the position (pose) of the camera in 3D space. This convention is analogous to the convention used in camera_calibrationcamera_calibrationCameraCalibrationCameraCalibrationcamera_calibration, i.e., for R=I and t=0 the x axis points to the right, the y axis downwards, and the z axis points forward. K is the calibration matrix of the camera (the camera matrix) which can be described as follows: Here, f is the focal length of the camera in pixels, a the aspect ratio of the pixels, s is a factor that models the skew of the image axes, and (u,v) is the principal point of the camera in pixels. In this convention, the x axis corresponds to the column axis and the y axis to the row axis.

Since the camera is stationary, it can be assumed that t=0. With this convention, it is easy to see that the fourth coordinate of the homogeneous 3D vector X has no influence on the position of the projected 3D point. Consequently, the fourth coordinate can be set to 0, and it can be seen that X can be regarded as a point at infinity, and hence represents a direction in 3D. With this convention, the fourth coordinate of X can be omitted, and hence X can be regarded as inhomogeneous 3D vector which can only be determined up to scale since it represents a direction. With this, the above projection equation can be written as follows: If two images of the same point are taken with a stationary camera, the following equations hold: and consequently If the camera parameters do not change when taking the two images, holds. Because of the above, the two images of the same 3D point are related by a projective 2D transformation. This transformation can be determined with proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac. It needs to be taken into account that the order of the coordinates of the projective 2D transformations in HALCON is the opposite of the above convention. Furthermore, it needs to be taken into account that proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac uses a coordinate system in which the origin of a pixel lies in the upper left corner of the pixel, whereas stationary_camera_self_calibrationstationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration uses a coordinate system that corresponds to the definition used in camera_calibrationcamera_calibrationCameraCalibrationCameraCalibrationcamera_calibration, in which the origin of a pixel lies in the center of the pixel. For projective 2D transformations that are determined with proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac the rows and columns must be exchanged and a translation of (0.5,0.5) must be applied. Hence, instead of the following equations hold in HALCON: and

From the above equation, constraints on the camera parameters can be derived in two ways. First, the rotation can be eliminated from the above equation, leading to equations that relate the camera matrices with the projective 2D transformation between the two images. Let be the projective transformation from image i to image j. Then, From the second equation, linear constraints on the camera parameters can be derived. This method is used for EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'linear'"linear""linear""linear""linear". Here, all source images i given by MappingSourceMappingSourceMappingSourcemappingSourcemapping_source and all destination images j given by MappingDestMappingDestMappingDestmappingDestmapping_dest are used to compute constraints on the camera parameters. After the camera parameters have been determined from these constraints, the rotation of the camera in the respective images can be determined based on the equation and by constructing a chain of transformations from the reference image ReferenceImageReferenceImageReferenceImagereferenceImagereference_image to the respective image. From the first equation above, a nonlinear method to determine the camera parameters can be derived by minimizing the following error: Here, analogously to the linear method, is the set of overlapping images specified by MappingSourceMappingSourceMappingSourcemappingSourcemapping_source and MappingDestMappingDestMappingDestmappingDestmapping_dest. This method is used for EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'nonlinear'"nonlinear""nonlinear""nonlinear""nonlinear". To start the minimization, the camera parameters are initialized with the results of the linear method. These two methods are very fast and return acceptable results if the projective 2D transformations are sufficiently accurate. For this, it is essential that the images do not have radial distortions. It can also be seen that in the above two methods the camera parameters are determined independently from the rotation parameters, and consequently the possible constraints are not fully exploited. In particular, it can be seen that it is not enforced that the projections of the same 3D point lie close to each other in all images. Therefore, stationary_camera_self_calibrationstationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration offers a complete bundle adjustment as a third method (EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard"). Here, the camera parameters and rotations as well as the directions in 3D corresponding to the image points (denoted by the vectors X above), are determined in a single optimization by minimizing the following error: In this equation, only the terms for which the reconstructed direction is visible in image i are taken into account. The starting values for the parameters in the bundle adjustment are derived from the results of the nonlinear method. Because of the high complexity of the minimization the bundle adjustment requires a significantly longer execution time than the two simpler methods. Nevertheless, because the bundle adjustment results in significantly better results, it should be preferred.

In each of the three methods the camera parameters that should be computed can be specified. The remaining parameters are set to a constant value. Which parameters should be computed is determined with the parameter CameraModelCameraModelCameraModelcameraModelcamera_model which contains a tuple of values. CameraModelCameraModelCameraModelcameraModelcamera_model must always contain the value 'focus'"focus""focus""focus""focus" that specifies that the focal length f is computed. If CameraModelCameraModelCameraModelcameraModelcamera_model contains the value 'principal_point'"principal_point""principal_point""principal_point""principal_point" the principal point (u,v) of the camera is computed. If not, the principal point is set to (ImageWidthImageWidthImageWidthimageWidthimage_width/2,ImageHeightImageHeightImageHeightimageHeightimage_height/2). If CameraModelCameraModelCameraModelcameraModelcamera_model contains the value 'aspect'"aspect""aspect""aspect""aspect" the aspect ratio a of the pixels is determined, otherwise it is set to 1. If CameraModelCameraModelCameraModelcameraModelcamera_model contains the value 'skew'"skew""skew""skew""skew" the skew of the image axes is determined, otherwise it is set to 0. Only the following combinations of the parameters are allowed: 'focus'"focus""focus""focus""focus", ['focus', 'principal_point']["focus", "principal_point"]["focus", "principal_point"]["focus", "principal_point"]["focus", "principal_point"], ['focus', 'aspect']["focus", "aspect"]["focus", "aspect"]["focus", "aspect"]["focus", "aspect"], ['focus', 'principal_point', 'aspect']["focus", "principal_point", "aspect"]["focus", "principal_point", "aspect"]["focus", "principal_point", "aspect"]["focus", "principal_point", "aspect"], and ['focus', 'principal_point', 'aspect', 'skew']["focus", "principal_point", "aspect", "skew"]["focus", "principal_point", "aspect", "skew"]["focus", "principal_point", "aspect", "skew"]["focus", "principal_point", "aspect", "skew"].

Additionally, it is possible to determine the parameter KappaKappaKappakappakappa, which models radial lens distortions, if EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" has been selected. In this case, 'kappa'"kappa""kappa""kappa""kappa" can also be included in the parameter CameraModelCameraModelCameraModelcameraModelcamera_model. KappaKappaKappakappakappa corresponds to the radial distortion parameter of the division model for lens distortions (see camera_calibrationcamera_calibrationCameraCalibrationCameraCalibrationcamera_calibration).

When using EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" to determine the principal point, it is possible to penalize estimations far away from the image center. This can be done by adding a sigma to the parameter 'principal_point:0.5'"principal_point:0.5""principal_point:0.5""principal_point:0.5""principal_point:0.5". If no sigma is given the penalty term in the above equation for calculating the error is omitted.

The parameter FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params determines whether the camera parameters can change in each image or whether they should be assumed constant for all images. To calibrate a camera so that it can later be used for measuring with the calibrated camera, only FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'true'"true""true""true""true" is useful. The mode FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'false'"false""false""false""false" is mainly useful to compute spherical mosaics with gen_spherical_mosaicgen_spherical_mosaicGenSphericalMosaicGenSphericalMosaicgen_spherical_mosaic if the camera zoomed or if the focus changed significantly when the mosaic images were taken. If a mosaic with constant camera parameters should be computed, of course FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'true'"true""true""true""true" should be used. It should be noted that for FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'false'"false""false""false""false" the camera calibration problem is determined very badly, especially for long focal lengths. In these cases, often only the focal length can be determined. Therefore, it may be necessary to use CameraModelCameraModelCameraModelcameraModelcamera_model = 'focus'"focus""focus""focus""focus" or to constrain the position of the principal point by using a small Sigma for the penalty term for the principal point.

The number of images that are used for the calibration is passed in NumImagesNumImagesNumImagesnumImagesnum_images. Based on the number of images, several constraints for the camera model must be observed. If only two images are used, even under the assumption of constant parameters not all camera parameters can be determined. In this case, the skew of the image axes should be set to 0 by not adding 'skew'"skew""skew""skew""skew" to CameraModelCameraModelCameraModelcameraModelcamera_model. If FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'false'"false""false""false""false" is used, the full set of camera parameters can never be determined, no matter how many images are used. In this case, the skew should be set to 0 as well. Furthermore, it should be noted that the aspect ratio can only be determined accurately if at least one image is rotated around the optical axis (the z axis of the camera coordinate system) with respect to the other images. If this is not the case the computation of the aspect ratio should be suppressed by not adding 'aspect'"aspect""aspect""aspect""aspect" to CameraModelCameraModelCameraModelcameraModelcamera_model.

As described above, to calibrate the camera it is necessary that the projective transformation for each overlapping image pair is determined with proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac. For example, for a 2x2 block of images in the following layout

1 2
3 4

the following projective transformations should be determined, assuming that all images overlap each other: 1->2, 1->3, 1->4, 2->3, 2->4 and 3->4. The indices of the images that determine the respective transformation are given by MappingSourceMappingSourceMappingSourcemappingSourcemapping_source and MappingDestMappingDestMappingDestmappingDestmapping_dest. The indices are start at 1. Consequently, in the above example MappingSourceMappingSourceMappingSourcemappingSourcemapping_source = [1,1,1,2,2,3] and MappingDestMappingDestMappingDestmappingDestmapping_dest = [2,3,4,3,4,4] must be used. The number of images in the mosaic is given by NumImagesNumImagesNumImagesnumImagesnum_images. It is used to check whether each image can be reached by a chain of transformations. The index of the reference image is given by ReferenceImageReferenceImageReferenceImagereferenceImagereference_image. On output, this image has the identity matrix as its transformation matrix.

The 3x3 projective transformation matrices that correspond to the image pairs are passed in HomMatrices2DHomMatrices2DHomMatrices2DhomMatrices2Dhom_matrices_2d. Additionally, the coordinates of the matched point pairs in the image pairs must be passed in Rows1Rows1Rows1rows1rows_1, Cols1Cols1Cols1cols1cols_1, Rows2Rows2Rows2rows2rows_2, and Cols2Cols2Cols2cols2cols_2. They can be determined from the output of proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac with tuple_selecttuple_selectTupleSelectTupleSelecttuple_select or with the HDevelop function subset. To enable stationary_camera_self_calibrationstationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration to determine which point pair belongs to which image pair, NumCorrespondencesNumCorrespondencesNumCorrespondencesnumCorrespondencesnum_correspondences must contain the number of found point matches for each image pair.

The computed camera matrices are returned in CameraMatricesCameraMatricesCameraMatricescameraMatricescamera_matrices as 3x3 matrices. For FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'false'"false""false""false""false", NumImagesNumImagesNumImagesnumImagesnum_images matrices are returned. Since for FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params = 'true'"true""true""true""true" all camera matrices are identical, a single camera matrix is returned in this case. The computed rotations are returned in RotationMatricesRotationMatricesRotationMatricesrotationMatricesrotation_matrices as 3x3 matrices. RotationMatricesRotationMatricesRotationMatricesrotationMatricesrotation_matrices always contains NumImagesNumImagesNumImagesnumImagesnum_images matrices.

If EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" is used, (XXXxx, YYYyy, ZZZzz) contains the reconstructed directions . In addition, ErrorErrorErrorerrorerror contains the average projection error of the reconstructed directions. This can be used to check whether the optimization has converged to useful values.

If the computed camera parameters are used to project 3D points or 3D directions into the image i the respective camera matrix should be multiplied with the corresponding rotation matrix (with hom_mat2d_composehom_mat2d_composeHomMat2dComposeHomMat2dComposehom_mat2d_compose).

Execution Information

  • Multithreading type: reentrant (runs in parallel with non-exclusive operators).
  • Multithreading scope: global (may be called from any thread).
  • Processed without parallelization.

Parameters

NumImagesNumImagesNumImagesnumImagesnum_images (input_control)  integer HTupleintHTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Number of different images that are used for the calibration.

Restriction: NumImages >= 2

ImageWidthImageWidthImageWidthimageWidthimage_width (input_control)  extent.x HTupleintHTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Width of the images from which the points were extracted.

Restriction: ImageWidth > 0

ImageHeightImageHeightImageHeightimageHeightimage_height (input_control)  extent.y HTupleintHTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Height of the images from which the points were extracted.

Restriction: ImageHeight > 0

ReferenceImageReferenceImageReferenceImagereferenceImagereference_image (input_control)  integer HTupleintHTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Index of the reference image.

MappingSourceMappingSourceMappingSourcemappingSourcemapping_source (input_control)  integer-array HTupleSequence[int]HTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Indices of the source images of the transformations.

MappingDestMappingDestMappingDestmappingDestmapping_dest (input_control)  integer-array HTupleSequence[int]HTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Indices of the target images of the transformations.

HomMatrices2DHomMatrices2DHomMatrices2DhomMatrices2Dhom_matrices_2d (input_control)  hom_mat2d-array HHomMat2D, HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Array of 3x3 projective transformation matrices.

Rows1Rows1Rows1rows1rows_1 (input_control)  point.y-array HTupleSequence[Union[float, int]]HTupleHtuple (real / integer) (double / int / long) (double / Hlong) (double / Hlong)

Row coordinates of corresponding points in the respective source images.

Cols1Cols1Cols1cols1cols_1 (input_control)  point.x-array HTupleSequence[Union[float, int]]HTupleHtuple (real / integer) (double / int / long) (double / Hlong) (double / Hlong)

Column coordinates of corresponding points in the respective source images.

Rows2Rows2Rows2rows2rows_2 (input_control)  point.y-array HTupleSequence[Union[float, int]]HTupleHtuple (real / integer) (double / int / long) (double / Hlong) (double / Hlong)

Row coordinates of corresponding points in the respective destination images.

Cols2Cols2Cols2cols2cols_2 (input_control)  point.x-array HTupleSequence[Union[float, int]]HTupleHtuple (real / integer) (double / int / long) (double / Hlong) (double / Hlong)

Column coordinates of corresponding points in the respective destination images.

NumCorrespondencesNumCorrespondencesNumCorrespondencesnumCorrespondencesnum_correspondences (input_control)  integer-array HTupleSequence[int]HTupleHtuple (integer) (int / long) (Hlong) (Hlong)

Number of point correspondences in the respective image pair.

EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method (input_control)  string HTuplestrHTupleHtuple (string) (string) (HString) (char*)

Estimation algorithm for the calibration.

Default: 'gold_standard' "gold_standard" "gold_standard" "gold_standard" "gold_standard"

List of values: 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard", 'linear'"linear""linear""linear""linear", 'nonlinear'"nonlinear""nonlinear""nonlinear""nonlinear"

CameraModelCameraModelCameraModelcameraModelcamera_model (input_control)  string-array HTupleSequence[str]HTupleHtuple (string) (string) (HString) (char*)

Camera model to be used.

Default: ['focus','principal_point'] ["focus","principal_point"] ["focus","principal_point"] ["focus","principal_point"] ["focus","principal_point"]

List of values: 'aspect'"aspect""aspect""aspect""aspect", 'focus'"focus""focus""focus""focus", 'kappa'"kappa""kappa""kappa""kappa", 'principal_point'"principal_point""principal_point""principal_point""principal_point", 'skew'"skew""skew""skew""skew"

FixedCameraParamsFixedCameraParamsFixedCameraParamsfixedCameraParamsfixed_camera_params (input_control)  string HTuplestrHTupleHtuple (string) (string) (HString) (char*)

Are the camera parameters identical for all images?

Default: 'true' "true" "true" "true" "true"

List of values: 'false'"false""false""false""false", 'true'"true""true""true""true"

CameraMatricesCameraMatricesCameraMatricescameraMatricescamera_matrices (output_control)  hom_mat2d-array HHomMat2D, HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

(Array of) 3x3 projective camera matrices that determine the internal camera parameters.

KappaKappaKappakappakappa (output_control)  real(-array) HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Radial distortion of the camera.

RotationMatricesRotationMatricesRotationMatricesrotationMatricesrotation_matrices (output_control)  hom_mat2d-array HHomMat2D, HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Array of 3x3 transformation matrices that determine rotation of the camera in the respective image.

XXXxx (output_control)  point3d.x-array HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

X-Component of the direction vector of each point if EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" is used.

YYYyy (output_control)  point3d.y-array HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Y-Component of the direction vector of each point if EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" is used.

ZZZzz (output_control)  point3d.z-array HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Z-Component of the direction vector of each point if EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" is used.

ErrorErrorErrorerrorerror (output_control)  real(-array) HTupleSequence[float]HTupleHtuple (real) (double) (double) (double)

Average error per reconstructed point if EstimationMethodEstimationMethodEstimationMethodestimationMethodestimation_method = 'gold_standard'"gold_standard""gold_standard""gold_standard""gold_standard" is used.

Example (HDevelop)

* Assume that Images contains four images in the layout given in the
* above description.  Then the following example performs the camera
* self-calibration using these four images.
From := [1,1,1,2,2,3]
To := [2,3,4,3,4,4]
HomMatrices2D := []
Rows1 := []
Cols1 := []
Rows2 := []
Cols2 := []
NumMatches := []
for J := 0 to |From|-1 by 1
    select_obj (Images, ImageF, From[J])
    select_obj (Images, ImageT, To[J])
    points_foerstner (ImageF, 1, 2, 3, 100, 0.1, 'gauss', 'true', \
                      RowsF, ColsF, _, _, _, _, _, _, _, _)
    points_foerstner (ImageT, 1, 2, 3, 100, 0.1, 'gauss', 'true', \
                      RowsT, ColsT, _, _, _, _, _, _, _, _)
    proj_match_points_ransac (ImageF, ImageT, RowsF, ColsF, RowsT, ColsT, \
                              'ncc', 10, 0, 0, 480, 640, 0, 0.5, \
                              'gold_standard', 2, 42, HomMat2D, \
                              Points1, Points2)
    HomMatrices2D := [HomMatrices2D,HomMat2D]
    Rows1 := [Rows1,subset(RowsF,Points1)]
    Cols1 := [Cols1,subset(ColsF,Points1)]
    Rows2 := [Rows2,subset(RowsT,Points2)]
    Cols2 := [Cols2,subset(ColsT,Points2)]
    NumMatches := [NumMatches,|Points1|]
endfor
stationary_camera_self_calibration (4, 640, 480, 1, From, To, \
                                    HomMatrices2D, Rows1, Cols1, \
                                    Rows2, Cols2, NumMatches, \
                                    'gold_standard', \
                                    ['focus','principal_point'], \
                                    'true', CameraMatrix, Kappa, \
                                    RotationMatrices, X, Y, Z, Error)

Result

If the parameters are valid, the operator stationary_camera_self_calibrationstationary_camera_self_calibrationStationaryCameraSelfCalibrationStationaryCameraSelfCalibrationstationary_camera_self_calibration returns the value 2 ( H_MSG_TRUE) . If necessary an exception is raised.

Possible Predecessors

proj_match_points_ransacproj_match_points_ransacProjMatchPointsRansacProjMatchPointsRansacproj_match_points_ransac, proj_match_points_ransac_guidedproj_match_points_ransac_guidedProjMatchPointsRansacGuidedProjMatchPointsRansacGuidedproj_match_points_ransac_guided

Possible Successors

gen_spherical_mosaicgen_spherical_mosaicGenSphericalMosaicGenSphericalMosaicgen_spherical_mosaic

See also

gen_projective_mosaicgen_projective_mosaicGenProjectiveMosaicGenProjectiveMosaicgen_projective_mosaic

References

Lourdes Agapito, E. Hayman, I. Reid: “Self-Calibration of Rotating and Zooming Cameras”; International Journal of Computer Vision; vol. 45, no. 2; pp. 107--127; 2001.

Module

Calibration