CoreImage之人脸识别OC版
- 核心代码:
#pragma mark - 识别人脸
- (void)faceDetectWithImage:(UIImage *)image {
// 图像识别能力:可以在CIDetectorAccuracyHigh(较强的处理能力)与CIDetectorAccuracyLow(较弱的处理能力)中选择
NSDictionary *opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
// 将图像转换为CIImage
CIImage *faceImage = [CIImage imageWithCGImage:image.CGImage];
CIDetector *faceDetector=[CIDetector detectorOfType:CIDetectorTypeFace context:nil options:opts];
// 识别出人脸数组
NSArray *features = [faceDetector featuresInImage:faceImage];
// 得到图片的尺寸
CGSize inputImageSize = [faceImage extent].size;
//将image沿y轴对称
CGAffineTransform transform = CGAffineTransformScale(CGAffineTransformIdentity, 1, -1);
//将图片上移
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
// 取出所有人脸
for (CIFaceFeature *faceFeature in features){
//获取人脸的frame
CGRect faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform);
CGSize viewSize = _imageView.bounds.size;
CGFloat scale = MIN(viewSize.width / inputImageSize.width,
viewSize.height / inputImageSize.height);
CGFloat offsetX = (viewSize.width - inputImageSize.width * scale) / 2;
CGFloat offsetY = (viewSize.height - inputImageSize.height * scale) / 2;
// 缩放
CGAffineTransform scaleTransform = CGAffineTransformMakeScale(scale, scale);
// 修正
faceViewBounds = CGRectApplyAffineTransform(faceViewBounds,scaleTransform);
faceViewBounds.origin.x += offsetX;
faceViewBounds.origin.y += offsetY;
//描绘人脸区域
UIView* faceView = [[UIView alloc] initWithFrame:faceViewBounds];
faceView.layer.borderWidth = 2;
faceView.layer.borderColor = [[UIColor redColor] CGColor];
[_imageView addSubview:faceView];
// 判断是否有左眼位置
if(faceFeature.hasLeftEyePosition){}
// 判断是否有右眼位置
if(faceFeature.hasRightEyePosition){}
// 判断是否有嘴位置
if(faceFeature.hasMouthPosition){}
}
}
原理请参见我的简书 ---iOS之使用CoreImage进行人脸识别, 文章中的代码使用的是Swift编写的,在文章结尾处也给出了下载地址。OC版和Swift版本原理一样,就是代码有些稍微不同,请注意。