Dual Face Recognition And Identification

Yash Panchwatkar
4 min readJun 24, 2021

--

AGENDA:

  1. Creating Model to recognising two different face
  2. If First person detected, Send Mail with his attached pic to owner of that camera and send Whatsapp msg to owner of camera.
  3. If Second person detected, Create EC2 Instance on AWS cloud and create EBS volume of 5GB and Attach that volume with Created Instance Using Terraform.

REQUIREMENTS:

  1. Python3 (recommended version : python3.7.9)
  2. Opencv-python (pip3 install opencv-python)
  3. numpy (pip3 install numpy)
  4. pywhatkit (pip3 install pywhatkit)
  5. Terraform ( Refer https://www.terraform.io/downloads.html to insatll and configure Terraform )

— — — — — — — — — — — — Lets Begin — — — — — — — — — — — — — —

STEP1: Creating and Training Model

For Training model we need a 100–1000 of images one particular person face for each model we want to train for face detection.[Note: You can refer below code to create a model for Training a model]

import cv2 #importing cv2 Module

cam = cv2.VideoCapture(0) #Opening Camera for capturing Images

#load model to detect face

model = cv2.CascadeClassifier(‘haarcascade_frontalface_default.xml’)

(width, height) = (80, 80) #Specifing width and height of images to capture

index = 1 #creating variable to stop loop

while (index!=150): #creating while loop and will stop after index or 150 images clicked

ret, photo = cam.read() #capturing images

gray = cv2.cvtColor(photo, cv2.COLOR_BGR2GRAY) #converting image to grayscale

faces = model.detectMultiScale(gray) #use model to detect face

if len(faces) == 0:

pass

else:

for (x1, y1, x2, y2) in faces:

cv2.rectangle(photo, (x1, y1), (x1 + x2, y1 + y2), (255, 0, 0), 2)

face = gray[y1:y1 + y2, x1:x1 + x2]

face_resize = cv2.resize(face, (width, height))

cv2.imwrite(f’mypic/s{index}.png’, face_resize)

index += 1

aphoto = cv2.rectangle(photo, (x1,y1),(x2,y2), [0,255,0], 5 )

cv2.imshow(‘Your Face’, aphoto)

if cv2.waitKey(25) == 13:

cv2.destroyAllWindows()

cam.release()

break

after capturing Images Now we Have to Train a Model to detect our faces. for that refer below code.

# Importing Required modules for Training Model

import cv2 as cv

import numpy as np

# Creating Empty Variable

train_img1 = [] #Empty variable to collect first person image to Train model

train_img2 = [] #Empty variable to collect second person image to Train model

test_img1 = [] #Empty variable to collect first person image to Test model

test_img2 = [] #Empty variable to collect second person image to Test model

label_x1 = [] #Empty variable to collect labels for first person training images

label_x2 = [] #Empty variable to collect labels for second person training images

label_y1 = [] #Empty variable to collect labels for first person testing images

label_y2 = [] #Empty variable to collect labels for second person testing images

for i in range(1, 373): #for loop to append all images replace 373 with no of training images you have

img = cv.imread(f’mypic/s{i}.png’, cv.IMREAD_GRAYSCALE) #Read first person image in grayscale for training

img2 = cv.imread(f’yourpic/s{i}.png’, cv.IMREAD_GRAYSCALE) #Read second person image in grayscale for training

train_img1.append(img)

train_img2.append(img2)

label_x1.append(i)

label_x2.append(i)

for i in range(374, 473):

img = cv.imread(f’testmypic/s{i}.png’, cv.IMREAD_GRAYSCALE) #Read first person image in grayscale for testing

img2 = cv.imread(f’testyourpic/s{i}.png’, cv.IMREAD_GRAYSCALE)#Read second person image in grayscale for testing

test_img1.append(img)

test_img2.append(img2)

label_y1.append(i)

label_y2.append(i)

# Converting appended list to numpy array

train_img_arr = np.asarray( train_img, dtype=np.uint64 )

train_label = np.asarray( label_x, dtype=np.uint64 )

test_img_arr = np.asarray( test_img, dtype=np.uint64 )

test_label = np.asarray( label_y, dtype=np.uint64 )

# Loading LBPH face recognizer model to train images

first = cv.face_LBPHFaceRecognizer.create()

second = cv.face_LBPHFaceRecognizer.create()

# Start training a model for both the persons to recognize their face

first.train(train_img_arr,train_label)

second.train(train_img_arr,train_label)

# Saving the trained model into file

first.save(‘first_model.pk1’)

second.save(‘second_model.pk1’)

STEP 2: Now its time to create a Condition that if first person detected in camera then its send mail and whatsapp message or if second person detected in camera then it create EC2 instances and EBS volume and attach EBS volume to created EC2 instance.

#go through github repository for whatsapp, email and AWS module

import cv2 , whatsapp, email, AWS
import numpy as np
import os

face_classifier = cv2.CascadeClassifier(‘../haarcascade_frontalface_default.xml’)

first_model=cv2.face.LBPHFaceRecognizer_create()
second_model=cv2.face.LBPHFaceRecognizer_create()
first_model.read(‘../first_model.pk1’’)
second_model.read(‘../second_model.pk1’)

def face_detector(img, size=0.5):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img, []
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h, x:x+w]
roi = cv2.resize(roi, (200, 200))
return img, roi

cap = cv2.VideoCapture(0)

count = 0
count2 = 0

while True: #creating while loop to continously capture and detect face
ret, frame = cap.read()
image, face = face_detector(frame)

try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
results = first_model.predict(face) #predicting captured face is of first person
results1 = second_model.predict(face) #predicting captured face is of second person

if results[1] < 50 :
confidence = int( 100 * (1 — (results[1])/400) )
display_string = str(confidence) + ‘% Confident it is User’

elif results1[1]<50:
confidence1 = int( 100 * (1 — (results1[1])/400) )
display_string = str(confidence1) + ‘% Confident it is User’

cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2)

if confidence > 85 and count <=20 and results[1] < 50:
count+=1
cv2.putText(image, “Hey FirstPerson!”, (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow(‘Face Recognition’, image )
if count == 15 :

#sending whatsapp msg
whatsapp.msg(num,msg)

#sending mail

Email = Gmail.Send_mail(fromaddr,toaddr,body,subject)

Email.smtp_session()

Email.auth()

Email.capture_img()

Email.Email_req()

Email.attach_img()

Email.send_email()

elif confidence1 > 85 and count2 <=20 and results1[1] < 50:
count2+=1
cv2.putText(image, “Hey Second Person!”, (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow(‘Face Recognition’, image )
ycounter+=1

if count2==15:
AWS.ec2() # launching ec2 instance

AWS.ebs() # creating ebs volume

AWS.attach() # attaching volume

else:
cv2.putText(image, “I dont know, who you are?”, (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow(‘Face Recognition’, image )

except:
cv2.putText(image, “No Face Found”, (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.putText(image, “looking for face”, (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow(‘Face Recognition’, image )
pass

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Thank You

--

--