import cv2
import numpy as np
import dlib
from math import hypot
from pynput.mouse import Button, Controller
import time
import pandas as pd
import numpy as np
from time import sleep
from threading import Thread
mouse = Controller()
font=cv2.FONT_HERSHEY_SIMPLEX
def shape_to_np(shape, dtype="int"):
# initialize the list of (x, y)-coordinates
coords = np.zeros((68, 2), dtype=dtype)
# loop over the 68 facial landmarks and convert them
# to a 2-tuple of (x, y)-coordinates
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
# return the list of (x, y)-coordinates
return coords
def eye_on_mask(mask, side):
points = [shape[i] for i in side]
points = np.array(points, dtype=np.int32)
mask = cv2.fillConvexPoly(mask, points, 255)
return mask
def contouring(thresh, mid, img, right=False):
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
try:
cnt = max(cnts, key = cv2.contourArea)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if right:
cx += mid
cv2.circle(img, (cx, cy), 4, (0, 0, 255), 2)
cv2.putText(img,str(cx),(50,150),font,3,(0,255,0))
cv2.putText(img,str(cy),(50,200),font,3,(0,255,0))
return cx,cy
except:
pass
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
cap = cv2.VideoCapture(0)
ret, img = cap.read()
thresh = img.copy()
cv2.namedWindow('image')
kernel = np.ones((9, 9), np.uint8)
def nothing(x):
pass
cv2.createTrackbar('threshold', 'image', 70, 255, nothing)
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for rect in rects:
shape = predictor(gray, rect)#tahmin
shape = shape_to_np(shape)#0-68 lik array
mask = np.zeros(img.shape[:2], dtype=np.uint8)#480/640 ekran
mask = eye_on_mask(mask, left)#
mask = eye_on_mask(mask, right)
mask = cv2.dilate(mask, kernel, 5)
eyes = cv2.bitwise_and(img, img, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes[mask] = [255, 255, 255]
mid = (shape[42][0] + shape[39][0]) // 2
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
threshold = cv2.getTrackbarPos('threshold', 'image')
_, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, None, iterations=2) #1
thresh = cv2.dilate(thresh, None, iterations=4) #2
thresh = cv2.medianBlur(thresh, 3) #3
thresh = cv2.bitwise_not(thresh)
cm=contouring(thresh[:, 0:mid], mid, img)
contouring(thresh[:, mid:], mid, img, True)
# for (x, y) in shape[36:48]:
# cv2.circle(img, (x, y), 2, (255, 0, 0), -1)
# show the image with the face detections + facial landmarks
print(type(cm))
print(shape[44][1])
print(shape[46][1])
print(shape[36][0])
ls=list(cm)
#cv2.putText(img,str(mid),(50,150),font,3,(0,255,0))
cv2.circle(img,(mid,150),10,(0,0,255),3)
print(ls[1])
if (abs(shape[36][0]-ls[0])>=22 and abs(shape[42][0]-ls[0])<=95):
cv2.putText(img, "sag", (50, 100), font, 2, (0, 0, 255), 3)
mouse.move(-25,0)
else:
cv2.putText(img, "sol", (50, 100), font, 2, (0, 0, 255), 3)
mouse.move(25,0)
if (abs(shape[44][1]-shape[46][1])>=12 and abs(ls[1]-shape[46][1])>=9):
cv2.putText(img, "up", (50, 100), font, 2, (0, 0, 255), 3)
mouse.move(0,-25)
else:
cv2.putText(img, "down", (50, 100), font, 2, (0, 0, 255), 3)
mouse.move(0,25)
cv2.imshow('eyes', img)
cv2.imshow("image", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
My project is inconsistent. Just up-down and just left-right can work. But both of them meld to each other.
Can you help me?