Converted clouds only have 13 points
antithing opened this issue · 0 comments
antithing commented
Hi, thanks again for this code. I managed to get it building on windows finally, but on running the samples (using TUM desk dataset), the images load fine, but the clouds that are created only contain 13 points and the Transform matrix is empty.
The code is:
Eigen::Matrix3f cameraMatrix;
cameraMatrix <<
fx, 0.0f, cx,
0.0f, fy, cy,
0.0f, 0.0f, 1.0f;
// Create the PinholePointProjector
nicp::PinholePointProjector pointProjector;
pointProjector.setMinDistance(0.01f);
pointProjector.setMaxDistance(4.5f);
pointProjector.setCameraMatrix(cameraMatrix);
pointProjector.setImageSize(480, 640);
pointProjector.setTransform(Eigen::Isometry3f::Identity());
pointProjector.scale(1.0f / imageScaling);
// Create StatsCalculator and InformationMatrixCalculator
nicp::StatsCalculatorIntegralImage statsCalculator;
nicp::PointInformationMatrixCalculator pointInformationMatrixCalculator;
nicp::NormalInformationMatrixCalculator normalInformationMatrixCalculator;
statsCalculator.setMinImageRadius(20 / imageScaling);
statsCalculator.setMaxImageRadius(40 / imageScaling);
statsCalculator.setMinPoints(40 / imageScaling);
statsCalculator.setCurvatureThreshold(0.2f);
statsCalculator.setWorldRadius(0.1f);
pointInformationMatrixCalculator.setCurvatureThreshold(0.02f);
normalInformationMatrixCalculator.setCurvatureThreshold(0.02f);
// Create DepthImageConverter
nicp::DepthImageConverterIntegralImage converter(&pointProjector, &statsCalculator,
&pointInformationMatrixCalculator,
&normalInformationMatrixCalculator);
// Create CorrespondenceFinder
nicp::CorrespondenceFinderProjective correspondenceFinder;
correspondenceFinder.setImageSize(pointProjector.imageRows(), pointProjector.imageCols());
correspondenceFinder.setInlierDistanceThreshold(0.5f);
correspondenceFinder.setInlierNormalAngularThreshold(0.95f);
correspondenceFinder.setFlatCurvatureThreshold(0.02f);
// Create Linearizer and Aligner
nicp::Linearizer linearizer;
nicp::AlignerProjective aligner;
linearizer.setInlierMaxChi2(9e3);
linearizer.setRobustKernel(true);
linearizer.setZScaling(true);
linearizer.setAligner(&aligner);
aligner.setOuterIterations(10);
aligner.setLambda(1e3);
aligner.setProjector(&pointProjector);
aligner.setCorrespondenceFinder(&correspondenceFinder);
aligner.setLinearizer(&linearizer);
// Get clouds from depth images
Eigen::Isometry3f initialGuess = Eigen::Isometry3f::Identity();
Eigen::Isometry3f sensorOffset = Eigen::Isometry3f::Identity();
nicp::RawDepthImage rawDepth;
nicp::DepthImage depth, scaledDepth;
nicp::Cloud referenceCloud, currentCloud, globalCloud;
rawDepth = cv::imread(referenceDepthFilename, -1);
cv::imshow("svav", rawDepth);
cv::waitKey(1);
if (!rawDepth.data) {
std::cerr << "Error: impossible to read image file " << referenceDepthFilename << std::endl;
return -1;
}
nicp::DepthImage_convert_16UC1_to_32FC1(depth, rawDepth, 0.001f);
nicp::DepthImage_scale(scaledDepth, depth, imageScaling);
converter.compute(referenceCloud, scaledDepth, sensorOffset);
rawDepth = cv::imread(currentDepthFilename, -1);
if (!rawDepth.data) {
std::cerr << "Error: impossible to read image file " << currentDepthFilename << std::endl;
return -1;
}
nicp::DepthImage_convert_16UC1_to_32FC1(depth, rawDepth, 0.001f);
nicp::DepthImage_scale(scaledDepth, depth, imageScaling);
converter.compute(currentCloud, scaledDepth, sensorOffset);
// Perform the registration
aligner.setReferenceCloud(&referenceCloud);
aligner.setCurrentCloud(¤tCloud);
aligner.setInitialGuess(initialGuess);
aligner.setSensorOffset(sensorOffset);
aligner.align();
When I view the cv::mat is looks correct, what could I be doing wrong here? Thank you!