基于OpenCV和wxWidgets实现人脸跟踪
2013-05-28 12:53
302 查看
#include <wx/wx.h> #include <wx/thread.h> #include <opencv2/video/tracking.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/objdetect/objdetect.hpp> using namespace std; using namespace cv; class App : public wxApp { virtual bool OnInit(); }; IMPLEMENT_APP(App); class GUI : public wxFrame { public: GUI(); void OnPaint(wxPaintEvent &evt); void OnSize(wxSizeEvent &evt); void Notify(wxImage &image); private: wxBitmap m_bmp; DECLARE_EVENT_TABLE(); }; BEGIN_EVENT_TABLE(GUI, wxFrame) EVT_PAINT(GUI::OnPaint) EVT_SIZE(GUI::OnSize) END_EVENT_TABLE() class FTThread : public wxThread { public: FTThread(GUI *gui); virtual void * Entry(); private: void ToGUI(wxImage &image); void Mat2wxImage(Mat &frame, wxImage &image); Mat convertType(const Mat& srcImg, int toType, double alpha, double beta); GUI *m_gui; }; class FaceTracker { public: FaceTracker(); ~FaceTracker(){} bool Init(Mat &frame); void Track(Mat &frame); private: Mat m_hist; Rect m_facerect; CascadeClassifier m_cascade; }; bool App::OnInit() { wxInitAllImageHandlers(); GUI *gui = new GUI; gui->Show(true); SetTopWindow(gui); FTThread *thread = new FTThread(gui); if(wxTHREAD_NO_ERROR != thread->Create()) { wxLogError("Can't create the thread!"); return false; } if(wxTHREAD_NO_ERROR != thread->Run()) { wxLogError("Can't create the thread!"); return false; } return true; } GUI::GUI() :wxFrame(NULL,-1,"FaceTracker",wxPoint(-1,-1),wxSize(800,600)) { wxImage img("sample.jpg"); img.Rescale(800,600); m_bmp = wxBitmap(img); SetBackgroundColour(*wxBLACK); } void GUI::Notify(wxImage &image) { m_bmp = wxBitmap(image.Scale(800,600)); Refresh(false); } void GUI::OnPaint(wxPaintEvent &evt) { wxPaintDC dc(this); dc.DrawBitmap(m_bmp,wxPoint(-1,-1)); } void GUI::OnSize(wxSizeEvent &evt) { wxImage image = m_bmp.ConvertToImage(); wxSize size = evt.GetSize(); m_bmp = wxBitmap(image.Scale(size.GetWidth(),size.GetHeight())); Refresh(false); } FTThread::FTThread(GUI *gui) :wxThread() { m_gui = gui; } void *FTThread::Entry() { unsigned char count=5; Mat frame; wxImage image; VideoCapture cap(0); if(!cap.isOpened()) return NULL; FaceTracker tracker; while(1) { cap>>frame; if(frame.empty()) continue; if(5 == count++) { if(!tracker.Init(frame)) { count = 5; continue; } else count = 0; } tracker.Track(frame); Mat2wxImage(frame,image); ToGUI(image); Sleep(100); } return NULL; } void FTThread::ToGUI(wxImage &image) { m_gui->Notify(image); } Mat FTThread::convertType(const Mat& srcImg, int toType, double alpha, double beta) { Mat dstImg; srcImg.convertTo(dstImg, toType, alpha, beta); return dstImg; } void FTThread::Mat2wxImage(Mat &frame, wxImage &image) { // data dimension int w = frame.cols, h = frame.rows; int size = w*h*3*sizeof(unsigned char); // allocate memory for internal wxImage data unsigned char * wxData = (unsigned char*) malloc(size); // the matrix stores BGR image for conversion Mat cvRGBImg = Mat(h, w, CV_8UC3, wxData); switch (frame.channels()) { case 1: // 1-channel case: expand and copy { // convert type if source is not an integer matrix if (frame.depth() != CV_8U) { cvtColor(convertType(frame, CV_8U, 255,0), cvRGBImg, CV_GRAY2RGB); } else { cvtColor(frame, cvRGBImg, CV_GRAY2RGB); } } break; case 3: // 3-channel case: swap R&B channels { int mapping[] = {0,2,1,1,2,0}; // CV(BGR) to WX(RGB) mixChannels(&frame, 1, &cvRGBImg, 1, mapping, 3); } break; default: { wxLogError(wxT("Cv2WxImage : input image (#channel=%d) should be either 1- or 3-channel"), frame.channels()); } } image.Destroy(); // free existing data if there's any image = wxImage(w, h, wxData); } FaceTracker::FaceTracker() { String face_cascade_name = "haarcascade_frontalface_alt.xml"; if(!m_cascade.load(face_cascade_name)) { wxLogError("error load cascade file"); } } bool FaceTracker::Init(Mat &frame) { Mat frame_gray; vector<Rect> faces; int ch[] = {0,0}; int hsize = 16; float hranges[] = {0,180}; const float *pRanges = hranges; Mat hsv,mask,hue; // Detect a face cvtColor(frame,frame_gray,CV_BGR2GRAY); equalizeHist(frame_gray, frame_gray); m_cascade.detectMultiScale(frame_gray,faces,1.1,2,0|CV_HAAR_SCALE_IMAGE,Size(30,30)); if(!faces.size()) return false; // 如果这次检测不到,那么尽可能用上一次的结果进行工作 m_facerect = faces[0]; cvtColor(frame,hsv,CV_BGR2HSV); // =>HSV inRange(hsv,Scalar(0,30,10),Scalar(180,256,256),mask); // =>MASK hue.create(hsv.size(),hsv.depth()); mixChannels(&hsv,1,&hue,1,ch,1); // =>H Mat roi(hue,m_facerect), maskroi(mask,m_facerect); calcHist(&roi,1,0,maskroi,m_hist,1,&hsize,&pRanges); normalize(m_hist,m_hist,0,255,CV_MINMAX); return true; } void FaceTracker::Track(Mat &frame) { static Mat hsv,hue,mask,backproj; static RotatedRect trackBox; static int ch[] = {0,0}; static float HueRange[] = {0,180}; static const float * pRanges = HueRange; cvtColor(frame,hsv,CV_BGR2HSV); // =>HSV inRange(hsv,Scalar(0,30,10),Scalar(180,256,256),mask); // =>MASK hue.create(hsv.size(),hsv.depth()); mixChannels(&hsv,1,&hue,1,ch,1); // =>H calcBackProject(&hue,1,0,m_hist,backproj,&pRanges); backproj &= mask; trackBox = CamShift(backproj,m_facerect, // 下面是给Meanshift的约束条件 TermCriteria(CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,10,1 )); ellipse(frame,trackBox,Scalar(0,0,255),3,CV_AA); }
相关文章推荐
- python 调用摄像头,基于opencv 的人脸检测实现
- 基于opencv人脸检测原理及实现
- 基于python3 OpenCV3实现静态图片人脸识别
- 基于粒子滤波的人脸跟踪实现------立项篇
- 文章标题基于opencv 实现人脸定位
- (学习笔记二)——基于opencv人脸检测原理及实现
- 基于openCV实现人脸检测
- 基于OpenCV的人脸识别算法之二---代码实现
- Linux系统下利用OpenCV实现人脸检测和基于LBPH算法的人脸识别
- 【AdaBoost算法】基于OpenCV实现人脸检测Demo
- Python基于OpenCV实现视频的人脸检测
- 使用 HTML5, javascript, webrtc, websockets, Jetty 和 OpenCV 实现基于 Web 的人脸识别
- OpenCV与Compressive Tracking实现人脸的实时检测与跟踪
- python结合opencv实现人脸检测与跟踪
- python结合opencv实现人脸检测与跟踪
- OpenCV + python 实现人脸检测(基于照片和视频进行检测)
- Opencv基于CamShift算法实现目标跟踪
- 基于Opencv的目标检测与跟踪阴影去除算法实现
- 基于python OpenCV实现动态人脸检测
- 基于opencv人脸检测原理及实现