# pip install --upgrade tensorflow
# pip install keras -U --pre
import cv2
import numpy as np
import dlib
import sys
import json
import time
from keras.models import model_from_json
def detect(frame):
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.15, minNeighbors=5)
print("Found {0} faces!".format(len(faces)))
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
frame = cv2.imread("image.jpg")
detect(frame)
cv2.imshow("face found", frame)
cv2.imwrite('img_detected.jpg', frame)
cv2.waitKey(0)
# use dlib to catch key factors on face
def factor_detect(frame):
detector = dlib.get_frontal_face_detector()
# load model
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
gray = cv2.cvtColor(frame, cv2.COLOR_BAYER_BG2BGR)
rects = detector(gray, 0)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rects[i]).parts()])
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.circle(frame, pos, 1, color=(0, 255, 0))
frame = cv2.imread("img.jpg")
factor_detect(frame)
cv2.imshow("face found", frame)
cv2.imwrite("img_landmark_detected.jpg", frame)
cv2.waitKey(0)
# use keras to detect emotion
expression_labels = ['angry', 'fear', 'happy', 'sad', 'surprise', 'neutral']
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# load model & weights
json_file = open("model.json", 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights('model.h5')
def predict_emotion(face_image_gray):
re_img = cv2.resize(face_image_gray, (48, 48), interpolation=cv2.INTER_AREA)
image = re_img.reshape(1, 1, 48, 48)
list_of_list = model.predict(image, batch_size=1, verbose=1)
angry, fear, happy, sad, surprise, neutral = [prob for lst in list_of_list for prob in lst]
return [angry, fear, happy, sad, surprise, neutral]
def classify(frame):
img_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
faces = faceCascade.detectMultiScale(img_gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=0)
for (x, y, w, h) in faces:
face_image_gray = img_gray[y:y+h, x:x+w]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
angry, fear, happy, sad, surprise, neutral = predict_emotion(face_image_gray)
l = [angry, fear, happy, sad, surprise, neutral]
max_index = l.index(max(l))
frame = cv2.putText(frame, expression_labels[max_index], (x, y), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3)
with open("expression_classifier.model", 'a') as f:
f.write("{}, {}, {}, {}, {}, {}\n".format(time.time(), angry, fear, happy, sad, surprise, neutral))
frame = cv2.imread("img.jpg")
classify(frame)
cv2.imshow("frame", frame)
cv2.imwrite("img_emotion_detected.jpg", frame)
cv2.waitKey(0)