dimanche 15 mars 2015

Inaccurate Camera Pose results


I am trying to measure the camera pose relative to a fiducial marker. I am measuring a marker similar to this here: http://ift.tt/18Qy8GI


My code is shown below. I have no trouble finding matches and even drawing a box around the match.


I've compared the camera pose to physical measurements, and the camera pose is definitely off by about a half meter. I wouldn't be surprised if there were minor errors, but a half meter struck me as a pretty large error.


At first, I thought it was my camera calibration, since there is significant distortion due to fish-eyed lenses. I recalibrated, but this apparently was not the issue.


I've been looking through my code to see if anything sticks out as the problem, but haven't seen it. Might be the case of looking at the code too long and it might be obvious to someone.



//Load marker & scene images
Mat marker, scene;
marker = imread(FILE1, CV_LOAD_IMAGE_UNCHANGED);
scene = imread(FILE2, CV_LOAD_IMAGE_UNCHANGED);

//Check to make sure both images were loaded
if( marker.empty() || scene.empty() ) {
cout << "Error: image cannot be loaded..!!" << endl;
system("pause");
return -1;
}

//Convert marker & scene images to grayscale
cvtColor(marker, marker, CV_RGB2GRAY);
cvtColor(scene, scene, CV_RGB2GRAY);

//Detect keypoints using ORB Detector
SiftFeatureDetector detector;
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(marker, keypoints1);
detector.detect(scene, keypoints2);

//Extract descriptors
SiftDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute( marker, keypoints1, descriptors1 );
extractor.compute( scene, keypoints2, descriptors2 );

//Brute Force Match descriptors from marker to scene
BFMatcher matcher(NORM_L2, true);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);

//Set arbitrary min and max distances between keypoints
double min_dist = 10000;
double max_dist = 0;

//Calculate the min and max distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ ) {
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}

//Define good matches
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ ) {
if( matches[i].distance <= max(2*min_dist, 90.0) ) { //90 was found through quick experimenting, this may or may not be optimal.
good_matches.push_back( matches[i]);
}
}

//Exit program if there are an insufficient number of matches
if ( good_matches.size() < 4) {
cout << "Error: image cannot be loaded..!!" << endl;
system("pause");
return -1;
}

//Draw good matches
Mat matchesImage;
drawMatches( marker, keypoints1, scene, keypoints2,
good_matches, matchesImage, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

//Find the coordinates of the source and target points
vector<Point2f> srcPoints, dstPoints;

for (int i = 0; i < matches.size(); i++) {
srcPoints.push_back(keypoints1[good_matches[i].queryIdx].pt);
dstPoints.push_back(keypoints2[good_matches[i].trainIdx].pt);
}

//Find homography
Mat H;
H = findHomography(srcPoints, dstPoints, CV_RANSAC);

//Get corners of marker //pixels
vector<Point2f> marker_corners(4);
marker_corners[0] = cvPoint(0,0);
marker_corners[1] = cvPoint(marker.cols, 0);
marker_corners[2] = cvPoint(marker.cols, marker.rows);
marker_corners[3] = cvPoint(0, marker.rows);

vector<Point2f> scene_corners(4);

//Perform perspective transform
perspectiveTransform(marker_corners, scene_corners, H);

//Draw lines between corners
line(matchesImage, scene_corners[0] + Point2f(marker.cols, 0), scene_corners[1] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[1] + Point2f(marker.cols, 0), scene_corners[2] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[2] + Point2f(marker.cols, 0), scene_corners[3] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);
line(matchesImage, scene_corners[3] + Point2f(marker.cols, 0), scene_corners[0] + Point2f(marker.cols,0), Scalar(0, 255, 0), 4);

//Open calibration file
FileStorage fs2("calibration.xml", FileStorage::READ);

//Read in camera matrix and distortion coefficients
Mat cameraMatrix, distCoeffs;
fs2["Camera_Matrix"] >> cameraMatrix;
fs2["Distortion_Coefficients"] >> distCoeffs;
fs2.release();

//Define marker corners in 3d
vector<Point3f> objp1(4);
marker_corners_3d[0] = cvPoint3D32f(0,0,0);
marker_corners_3d[1] = cvPoint3D32f(marker.cols, 0, 0);
marker_corners_3d[2] = cvPoint3D32f(marker.cols, marker.rows, 0);
marker_corners_3d[3] = cvPoint3D32f(0, marker.rows, 0);

//Find Object Pose
solvePnPRansac( marker_corners_3d, scene_corners, cameraMatrix, distCoeffs, rvec, tvec );

//Convert rotation vector to rotation matrix
Mat R;
Rodrigues(rvec, R);

//solvePnPRansac found the pose of the marker relative to the camera.
//Need to find the pose of the camera relatiive to the marker...
//Transpose matrix
R = R.t();

//Convert back to rotation vector
Rodrigues(R,rvec);

//Find the translation vector of camera relative to marker
tvec = -R*tvec;

//Define the camera pose matrix
Mat camPose = Mat::eye(4, 4, R.type());
R.copyTo(camPose.rowRange(0,3).colRange(0,3));
tvec.copyTo(camPose.rowRange(0,3).colRange(3,4));

}



Aucun commentaire:

Enregistrer un commentaire