OpenCV实践之路——行人检测

  • Post author:
  • Post category:其他


参考:

http://blog.csdn.net/xingchenbingbuyu/article/details/51255253


参考:

http://blog.csdn.net/garfielder007/article/details/50441048


行人检测是视觉领域很热也很有用的一个主题,特别是在无人驾驶中,行人检测的重要性不言而喻。

在之前进行了人脸检测之后,行人检测就显得简单多了。过程大致与人脸检测一样,都是先加载分类器,然后进行多尺度检测。

c++

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/gpu/gpu.hpp>
//#include <stdio.h>


using namespace cv;

int main(int argc, char** argv)
{
    Mat img;
    vector<Rect> people;
    img = imread("xingren.jpg",1);

    //定义HOG对象,采用默认参数,或者按照下面的格式自己设置
    HOGDescriptor defaultHog;
        //(cv::Size(64, 128), cv::Size(16, 16), cv::Size(8, 8), 
                                //cv::Size(8, 8),9, 1, -1, 
                                //cv::HOGDescriptor::L2Hys, 0.2, true, 
                                //cv::HOGDescriptor::DEFAULT_NLEVELS);

    //设置SVM分类器,用默认分类器
    defaultHog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

    //对图像进行多尺度行人检测,返回结果为矩形框
    defaultHog.detectMultiScale(img, people,0,Size(8,8),Size(0,0),1.03,2);

    //画长方形,框出行人
    for (int i = 0; i < people.size(); i++)
    {
        Rect r = people[i];
        rectangle(img, r.tl(), r.br(), Scalar(0, 0, 255), 3);
    }

    namedWindow("检测行人", CV_WINDOW_AUTOSIZE);
    imshow("检测行人", img);
    waitKey(0);

    return 0;
}

python

import cv2
from imutils.object_detection import non_max_suppression
import numpy as np

img=cv2.imread("./image/img.jpg")
orig = img.copy()
# 定义HOG对象,采用默认参数,或者按照下面的格式自己设置
defaultHog=cv2.HOGDescriptor()

# 设置SVM分类器,用默认分类器
defaultHog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

# 对图像进行多尺度行人检测,返回结果为矩形框
# people=defaultHog.detectMultiScale(img, 0,(8,8),(32,32),1.05,2)

# detect people in the image
(rects, weights) = defaultHog.detectMultiScale(img, winStride=(4, 4),
     padding=(8, 8), scale=1.05)

# 画长方形,框出行人
# for i in range(len(people)):
#     r=people[0][i]
#     cv2.rectangle(img,(r[0],r[1]),(r[2],r[3]),(0,255,0),2,cv2.LINE_AA)

for (x, y, w, h) in rects:
    cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
    cv2.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 2)

cv2.imshow("Before NMS", orig)
cv2.imshow("After NMS", img)
cv2.waitKey(0)

参考:

http://blog.csdn.net/garfielder007/article/details/50441048

# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--images", required=True, help="path to images directory")
# args = vars(ap.parse_args())

# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

# loop over the image paths
# for imagePath in paths.list_images(args["images"]):
# load the image and resize it to (1) reduce detection time
# and (2) improve detection accuracy
imagePath="./image/img.jpg"
image = cv2.imread(imagePath)
image = imutils.resize(image, width=min(400, image.shape[1]))
orig = image.copy()

# detect people in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
      padding=(8, 8), scale=1.05)

# draw the original bounding boxes
for (x, y, w, h) in rects:
     cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
     cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)

# show some information on the number of bounding boxes
filename = imagePath[imagePath.rfind("/") + 1:]
print("[INFO] {}: {} original boxes, {} after suppression".format(
     filename, len(rects), len(pick)))

# show the output images
cv2.imshow("Before NMS", orig)
cv2.imshow("After NMS", image)
cv2.waitKey(0)