前回に続き, 画像認識の基礎について説明する.

3.19 目を検出する[1][2][3]
まず, 画像から顔を検出し, その検出矩形の中から目を検出する.

[手順]
 (1) 顔検出については, 「顔を検出する」を参照.
 (2) 目の分類器を読み込む.
 (3) 検出した顔の領域の画像を取得する.
 (4) 目を探索する.

[コード]
package com.moonlight_aska.android.opencv.image19;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;

import org.opencv.android.Utils;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import org.opencv.objdetect.CascadeClassifier;

import android.os.Bundle;
import android.app.Activity;
import android.content.Context;
import android.graphics.Bitmap;
import android.widget.ImageView;

public class Image19Activity extends Activity {

    @Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.main);
       
        // cv::Mat img = cv::imread("../../iamge/lenna.png", 1);
        Mat srcMat = Highgui.imread("/sdcard/OpenCV/sample/lena.jpg", 1);
        if (!srcMat.empty()) {
            double scale = 2.0;
            // cv::Mat gray, smallImg(cv::saturate_cast<int>(img.rows/scale),
            //        cv::saturate_cast<int>(img.cols/scale), CV_8UC1);
            Mat grayMat = new Mat();
            Size smallSize = new Size(srcMat.rows()/scale, srcMat.cols()/scale);
            Mat smallMat = new Mat(smallSize, CvType.CV_8UC1);
            // グレースケール画像に変換
            // cv::cvtColor(img, gray, CV_RGB2GRAY);
            Imgproc.cvtColor(srcMat, grayMat, Imgproc.COLOR_RGB2GRAY);
            // 処理時間短縮のために画像を縮小
            // cv::resize(gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LENEAR;
            Imgproc.resize(grayMat, smallMat, smallMat.size(), 0, 0, Imgproc.INTER_LINEAR);
            // cv::equalizeHist(smallImg, smallImg);
            Imgproc.equalizeHist(smallMat, smallMat);
    
            try {
                // 分類器の読み込み
                // std::string cascadeName = "./haarcascade_frontalface_alt.xml";   // Haar-like
                InputStream inStream = getResources().openRawResource(R.raw.haarcascade_frontalface_alt);
                File cascadeDir = this.getDir("cascade", Context.MODE_PRIVATE);
                File cascadeFile = new File(cascadeDir, "haarcascade_frontalface_alt.xml");       
                // // std::string cascadeName = "./lbpcascade_frontablface.xml"; // LBP
                // InputStream inStream = getResources().openRawResource(R.raw.lbpcascade_frontalface);
                // File cascadeDir = this.getDir("cascade", Context.MODE_PRIVATE);
                // File cascadeFile = new File(cascadeDir, "lbpcascade_frontalface.xml");
                FileOutputStream outStream;
                outStream = new FileOutputStream(cascadeFile);
                byte[] buf = new byte[2048];
                int rdBytes;
                while ((rdBytes = inStream.read(buf)) != -1) {
                    outStream.write(buf, 0, rdBytes);
                }
                outStream.close();
                inStream.close();
                // cv::CascadeClassifier cascade;
                // if(!cascade.load(cascadeName))
                //     return -1;
                CascadeClassifier cascade = new CascadeClassifier(cascadeFile.getAbsolutePath());
                if (cascade.empty()) {
                    cascade = null;
                    return;
                }
                else {
                    cascadeDir.delete();
                    cascadeFile.delete();  
                }
                // std::vector<cv::Rect> faces;
                List<Rect> faces = new LinkedList<Rect>();
                /// マルチスケール(顔)探索
                // 画像, 出力矩形, 縮小スケール, 最低矩形数, (フラグ), 最小矩形
                // cascade.detectMultiScale(smallImg, faces,
                //       1.1, 2
                //       CV_HAAR_SCALE_IMAGE
                //       ,
                //       cv::Size(30,30);
                cascade.detectMultiScale(smallMat, faces,
                        1.1, 2, 2 // TODO: objdetect.CV_HAAR_SCALE_IMAGE
                        , new Size(30, 30));
         
                // 分類器の読み込み
                // std::string cascadeName = "./haarcascade_eye.xml";   // Haar-like

                inStream = getResources().openRawResource(R.raw.haarcascade_eye);
                cascadeDir = this.getDir("cascade", Context.MODE_PRIVATE);
                cascadeFile = new File(cascadeDir, "haarcascade_eye.xml");
       
                // // std::string nested_cascadeName = "./haarcascade_eye_tree_eyeglasses.xml";
                // InputStream inStream = getResources().openRawResource(R.raw.haarcascade_eye_tree_eyeglasses);
                // File cascadeDir = this.getDir("cascade", Context.MODE_PRIVATE);
                // File cascadeFile = new File(cascadeDir, "haarcascade_eye_tree_eyeglasses.xml");

                outStream = new FileOutputStream(cascadeFile);
                while ((rdBytes = inStream.read(buf)) != -1) {
                    outStream.write(buf, 0, rdBytes);
                } 
                outStream.close();
                inStream.close();

                // cv::CascadeClassifier nested_cascade;
                // if(!nested_cascade.load(nested_cascadeName))
                //     return -1;

                CascadeClassifier nested_cascade = new CascadeClassifier(cascadeFile.getAbsolutePath());
                if (cascade.empty()) {
                    cascade = null;
                    return;
                }
                else {
                    cascadeDir.delete();
                    cascadeFile.delete();  
                }
                // std::vector<cv::Rect>::const_iterator r = faces.begin();
                // for(; r != faces.end(); ++r ) {
                for(Rect r : faces) {
                    // 検出結果(顔)の描画
                    // cv::Point face_center;
                    // int radius;
                    // face_center.x = cv::saturate_cast<int>((r->x + r->width*0.5)*scale);
                    // face_center.y = cv::saturate_cast<int>((r->y + r->height*0.5)*scale);
                    // face_radius = cv::saturate_cast<int>((r->width + r->height)*0.25*scale);
                    // cv::sircle(img, face_center, face_radius, cv::Scalar(80,80,255), 3, 8, 0);
                    Point face_center = new Point();
                    int face_radius;
                    face_center.x = (int)((r.x + r.width*0.5)*scale);
                    face_center.y = (int)((r.y + r.height*0.5)*scale);
                    face_radius = (int)((r.width + r.height)*0.25*scale);
                    Core.circle(srcMat, face_center, face_radius, new Scalar(80,80,255), 3, 8, 0);
                 
                    // cv::Mat smallImgROI = smallImg(*r);
                    // std::vector<cv::Rect> nestedObjects;

                    Mat smallMatROI = new Mat(smallMat, r);
                    List<Rect> nestedObjects = new LinkedList<Rect>();

                    /// マルチスケール(目)探索
                    // 画像, 出力矩形, 縮小スケール, 最低矩形数, (フラグ), 最小矩形
                    // nested_cascade.detectMultiScale(smallImgROI, nestedObjects,
                    //       1.1, 3
                    //       CV_HAAR_SCALE_IMAGE
                    //       ,
                    //       cv::Size(10,10);
                    nested_cascade.detectMultiScale(smallMatROI, nestedObjects,
                            1.1, 3, 2 // TODO: objdetect.CV_HAAR_SCALE_IMAGE
                            , new Size(10, 10));

                    // 検出結果(目)の描画
                    // std::vector<cv::Rect>::const_iterator nr = nestedObjects.begin();
                    // for(; nr != nestedObjects.end(); ++nr ) {
                    //  cv::Point center;
                    //  int radius;
                    //  center.x = cv::saturate_cast<int>((r->x + nr->x + nr->width*0.5)*scale);
                    //   center.y = cv::saturate_cast<int>((r->y + nr->y + nr->height*0.5)*scale);
                    //  radius = cv::saturate_cast<int>((nr->width + nr->height)*0.25*scale);
                    //  cv::sircle(img, center, radious, cv::Scalar(80,255,80), 3, 8, 0);
                    // }
                    for (Rect nr : nestedObjects) {
                        Point center = new Point();
                        int radius;
                        center.x = (int)((r.x + nr.x + nr.width*0.5)*scale);
                        center.y = (int)((r.y + nr.y + nr.height*0.5)*scale);
                        radius = (int)((nr.width + nr.height)*0.25*scale);
                        Core.circle(srcMat, center, radius, new Scalar(80,255,80), 3, 8, 0);
                    }
                }
                Bitmap faceImg = convMatToBitmap(srcMat);
                ImageView faceView = (ImageView)findViewById(R.id.face_view);
                faceView.setImageBitmap(faceImg);
            } catch (FileNotFoundException e1) {
                // TODO Auto-generated catch block
                e1.printStackTrace();
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }
   
    // MatからBitmapに変換
    Bitmap convMatToBitmap(Mat src) {
        Mat dst = new Mat();
        // BGR→RGBAに変換
        Imgproc.cvtColor(src, dst, Imgproc.COLOR_BGR2RGBA, 4);
        Bitmap img = Bitmap.createBitmap(src.width(), src.height(), Bitmap.Config.ARGB_8888);
        // MatからBitmapに変換
        Utils.matToBitmap(dst, img);
        return img;
    }
}


[入力画像]
image02-1

[実行結果]
Image19

他の学習結果ファイルを使うことで, メガネをかけた人の目や, 鼻, 口なども検出できるので, 試してみてください.

----
参照URL:
 [1] OpenCV 2 プログラミングブック OpenCV2.2/2.3対応
 [2] OpenCV逆引きリファレンス―OpenCV-CookBook
 [3] OpenCVで学ぶ画像認識