{ "cells": [ { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [], "source": [ "import cv2" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [], "source": [ "faceProto = 'opencv_face_detector.pbtxt'\n", "faceModel = 'opencv_face_detector_uint8.pb'\n", "\n", "ageProto = 'age_deploy.prototxt'\n", "ageModel = 'age_net.caffemodel'\n", "\n", "genderProto = 'gender_deploy.prototxt'\n", "genderModel = 'gender_net.caffemodel'" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [], "source": [ "faceNet = cv2.dnn.readNet(faceModel, faceProto)\n", "ageNet = cv2.dnn.readNet(ageModel, ageProto)\n", "genderNet = cv2.dnn.readNet(genderModel, genderProto)" ] }, { "cell_type": "code", "execution_count": 40, "metadata": {}, "outputs": [], "source": [ "ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\n", "genderList = ['Male', 'Female']\n", "MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)" ] }, { "cell_type": "code", "execution_count": 41, "metadata": {}, "outputs": [], "source": [ "def faceBox(faceNet, frame):\n", " frameWidth = frame.shape[1]\n", " frameHeight = frame.shape[0]\n", "\n", " blob = cv2.dnn.blobFromImage(frame, 1.0, (227, 227), [104, 117, 123], swapRB = False)\n", " faceNet.setInput(blob)\n", " detection = faceNet.forward()\n", " #print(detection.shape)\n", " bboxs = []\n", "\n", " for i in range(detection.shape[2]):\n", " confidence = detection[0,0,i,2]\n", " if confidence > 0.7:\n", " x1 = int(detection[0,0,i,3] * frameWidth)\n", " y1 = int(detection[0,0,i,4] * frameHeight)\n", "\n", " x2 = int(detection[0,0,i,5] * frameWidth)\n", " y2 = int(detection[0,0,i,6] * frameHeight)\n", "\n", " bboxs.append([x1, y1, x2, y2])\n", " cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 1)\n", " return frame, bboxs" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [], "source": [ "from cProfile import label\n", "padding = 20\n", "\n", "video = cv2.VideoCapture(0)\n", "while True:\n", " ret, frame = video.read()\n", " frame, bboxs = faceBox(faceNet, frame)\n", "\n", " for bbox in bboxs:\n", " #face = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]\n", " face = frame[max(0,bbox[1]-padding):min(bbox[3]+padding,frame.shape[0]-1),max(0,bbox[0]-padding):min(bbox[2]+padding, frame.shape[1]-1)]\n", " blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB = False)\n", " genderNet.setInput(blob)\n", " genderPred = genderNet.forward()\n", " gender = genderList[genderPred[0].argmax()]\n", "\n", "\n", " ageNet.setInput(blob)\n", " agePred = ageNet.forward()\n", " age = ageList[agePred[0].argmax()]\n", "\n", "\n", " label = \"{},{}\".format(gender, age)\n", " #cv2.rectangle(frame, (bbox[0], bbox[1]-30), (bbox[2], bbox[1]), (0,255, 255), -1)\n", " cv2.putText(frame, label, (bbox[0], bbox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, cv2.LINE_AA)\n", "\n", "\n", "\n", " cv2.imshow('Ang & Gender Recognition', frame)\n", " k = cv2.waitKey(1)\n", "\n", " if k == ord('q'):\n", " break\n", "\n", "video.release()\n", "cv2.destroyAllWindows()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3.7.13 ('tf-venv')", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.13" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "0ba149fa7e93be1838969562ba936f03b106743956a9865899747345046c9c03" } } }, "nbformat": 4, "nbformat_minor": 2 }