机器学习-CoreML人脸识别


使用Vision框架进行人脸识别,不仅可以识别出人脸的面部轮廓,还可以识别出人脸的特征点,比如眼睛、鼻子、嘴巴等。进行人脸请求的类是VNDetectFaceRectanglesRequest,人脸特征请求的类是VNDetectFaceRectanglesRequest,请求结果的信息类是VNFaceObservation。下面直接上代码。

  1. 搭建基本UI

  1. 识别面部轮廓
1
2
3
4
5
6
func processImage(image:UIImage) {
self.resultLabel.text = "processing..."
let handler = VNImageRequestHandler.init(cgImage: image.cgImage!)
let request = VNDetectFaceRectanglesRequest.init(completionHandler: handleFaceDetect)
try? handler.perform([request])
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
func handleFaceDetect(request:VNRequest,error:Error?) {
guard let obversations = request.results as? [VNFaceObservation] else {
fatalError("no face")
}
self.resultLabel.text = "\(obversations.count)face"

for subView in self.btnImageView.subviews where subView.tag == 100 {
subView.removeFromSuperview()
}

for faceObversion in obversations {
self.showFaceContour(faceObversion: faceObversion, toView: self.btnImageView)

}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
func showFaceContour(faceObversion face : VNFaceObservation, toView view : UIView) {
let boundingBox = face.boundingBox
let imageBoundingBox = view.bounds

let w = boundingBox.size.width * imageBoundingBox.width
let h = boundingBox.size.height * imageBoundingBox.height

let x = boundingBox.origin.x * imageBoundingBox.width
let y = imageBoundingBox.height - boundingBox.origin.y * imageBoundingBox.height - h

let subview = UIView(frame: CGRect(x: x, y: y, width: w, height: h))
subview.layer.borderColor = UIColor.green.cgColor
subview.layer.borderWidth = 1
subview.layer.cornerRadius = 1
subview.tag = 100
view.addSubview(subview)
}

  • 相当于在人脸上画了一个轮廓视图添加上去。boundingBox是CGRect类型,但是boundingBox返回的是x,y,w,h的比例(value:0~1),需要进行转换。原始坐标系的原点是左上角,而这里是左下角,并且y轴的方向和原始坐标系相反。

3.面部轮廓展示

4.识别面部特征

1
2
3
4
5
6
func processImage(image:UIImage) {
self.resultLabel.text = "processing..."
let handler = VNImageRequestHandler.init(cgImage: image.cgImage!)
let request = VNDetectFaceLandmarksRequest.init(completionHandler: handleFaceDetect)
try? handler.perform([request])
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
func handleFaceDetect(request:VNRequest,error:Error?) {
guard let obversations = request.results as? [VNFaceObservation] else {
fatalError("no face")
}
self.resultLabel.text = "\(obversations.count) face"

var landmarkRegions : [VNFaceLandmarkRegion2D] = []

for faceObversion in obversations {
landmarkRegions += self.showFaceFeature(faceObversion: faceObversion, toView: self.btnImageView)
resultImage = self.drawOnImage(source: self.resultImage,
boundingRect: faceObversion.boundingBox,
faceLandmarkRegions: landmarkRegions)
}
self.btnImageView.setBackgroundImage(resultImage, for: .normal)
}

获取特征点

1
2
3
4
5
6
7
8
9
10
11
func showFaceFeature(faceObversion face: VNFaceObservation, toView view: UIView) ->[VNFaceLandmarkRegion2D] {
guard let landmarks = face.landmarks else { return [] }
var landmarkRegions: [VNFaceLandmarkRegion2D] = []
if let rightEye = landmarks.rightEye {
landmarkRegions.append(rightEye)
}
if let nose = landmarks.nose {
landmarkRegions.append(nose)
}
return landmarkRegions
}

重新绘制图片(含面部特征)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
func drawOnImage(source: UIImage,
boundingRect: CGRect,
faceLandmarkRegions: [VNFaceLandmarkRegion2D]) -> UIImage {
UIGraphicsBeginImageContextWithOptions(source.size, false, 1)
let context = UIGraphicsGetCurrentContext()!
context.translateBy(x: 0, y: source.size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.setBlendMode(CGBlendMode.colorBurn)
context.setLineJoin(.round)
context.setLineCap(.round)
context.setShouldAntialias(true)
context.setAllowsAntialiasing(true)
context.setLineWidth(5.0)
context.setStrokeColor(UIColor.black.cgColor)

// origin image
let rect = CGRect(x: 0, y:0, width: source.size.width, height: source.size.height)
context.draw(source.cgImage!, in: rect)

// face contour
let rectWidth = source.size.width * boundingRect.size.width
let rectHeight = source.size.height * boundingRect.size.height
context.addRect(CGRect(x: boundingRect.origin.x * source.size.width, y:boundingRect.origin.y * source.size.height, width: rectWidth, height: rectHeight))
context.drawPath(using: CGPathDrawingMode.stroke)

// draw face feature
context.setStrokeColor(UIColor.red.cgColor)
for faceLandmarkRegion in faceLandmarkRegions {
var points: [CGPoint] = []
for i in 0..<faceLandmarkRegion.pointCount {
let point = faceLandmarkRegion.normalizedPoints[i]
let p = CGPoint(x: CGFloat(point.x), y: CGFloat(point.y))
points.append(p)
}
let mappedPoints = points.map { CGPoint(x: boundingRect.origin.x * source.size.width + $0.x * rectWidth, y: boundingRect.origin.y * source.size.height + $0.y * rectHeight) }
context.addLines(between: mappedPoints)
context.drawPath(using: CGPathDrawingMode.stroke)
}

let coloredImg : UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return coloredImg
}

VNDetectFaceLandmarksRequest请求返回的也是VNFaceObservation,但是这个时候VNFaceObservation 对象的landmarks属性就会有值,这个属性里面存储了人物面部特征的点
如:

  • faceContour:脸部轮廓
  • leftEye、rightEye: 左眼、右眼
  • nose: 鼻子
  • noseCrest: 鼻嵴
  • outerLips、innerLips: 外唇、内唇
  • leftEyebrow、rightEyebrow: 左眉毛、右眉毛
  • leftPupil、rightPupil: 左瞳、右瞳

5.面部特征点展示



Demo下载


如有任何疑问或问题请联系我:fishnewsdream@gmail.com,欢迎交流,共同提高!

Objective-C/Swift技术开发交流群201556264,讨论何种技术并不受限,欢迎各位大牛百家争鸣!

微信公众号OldDriverWeekly,欢迎关注并提出宝贵意见

老司机iOS周报,欢迎关注或订阅

刚刚在线工作室,欢迎关注或提出建设性意见!

刚刚在线论坛, 欢迎踊跃提问或解答!

如有转载,请注明出处,谢谢!

本站总访问量 本文总阅读量