From 0d32616e6aa9a8c64363f304e5b8241e410601a8 Mon Sep 17 00:00:00 2001 From: vinesmsuic Date: Wed, 14 Jul 2021 02:22:36 +0800 Subject: [PATCH 1/5] Update README.md --- README.md | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index dc4e1e5c..5002d94f 100644 --- a/README.md +++ b/README.md @@ -15,21 +15,40 @@ _🚀 Quick note: I'm looking for job opportunities as a software developer, for Clone this project: -``` +```shell git clone https://github.com/antoinelame/GazeTracking.git ``` +### For Pip install Install these dependencies (NumPy, OpenCV, Dlib): -``` +```shell pip install -r requirements.txt ``` > The Dlib library has four primary prerequisites: Boost, Boost.Python, CMake and X11/XQuartx. If you doesn't have them, you can [read this article](https://www.pyimagesearch.com/2017/03/27/how-to-install-dlib/) to know how to easily install them. -Run the demo: +### For Anaconda install +Create a conda environment + +```shell +conda create --name GazeTracking +conda activate GazeTracking +``` + +```shell +conda install -c anaconda numpy -y +conda install -c anaconda opencv -y +conda install -c conda-forge dlib -y ``` + + +### Verify Installation + +Run the demo: + +```shell python example.py ``` From 1993bae8e51d3c408e56eca6561774b2f567173a Mon Sep 17 00:00:00 2001 From: vinesmsuic Date: Wed, 14 Jul 2021 03:13:48 +0800 Subject: [PATCH 2/5] adding enviroment.yml --- README.md | 11 +++-------- environment.yml | 9 +++++++++ 2 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 environment.yml diff --git a/README.md b/README.md index 5002d94f..9377c5ce 100644 --- a/README.md +++ b/README.md @@ -30,19 +30,14 @@ pip install -r requirements.txt ### For Anaconda install -Create a conda environment +Install these dependencies (NumPy, OpenCV, Dlib): ```shell -conda create --name GazeTracking +conda env create --file environment.yml +#After creating environment, activate it conda activate GazeTracking ``` -```shell -conda install -c anaconda numpy -y -conda install -c anaconda opencv -y -conda install -c conda-forge dlib -y -``` - ### Verify Installation diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..1b9f4931 --- /dev/null +++ b/environment.yml @@ -0,0 +1,9 @@ +name: GazeTracking +channels: + - conda-forge + - anaconda + - defaults +dependencies: + - numpy == 1.16.1 + - opencv == 3.4.* + - dlib == 19.17.* From f613915dcf4c1c970fd82f1384a796a50dc7a0dd Mon Sep 17 00:00:00 2001 From: vinesmsuic Date: Wed, 14 Jul 2021 05:04:53 +0800 Subject: [PATCH 3/5] Adding Video Inference --- README.md | 17 +++++++++ example.py | 4 ++ example_video.py | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 example_video.py diff --git a/README.md b/README.md index 9377c5ce..791911a6 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,22 @@ while True: break ``` +### Video Inference + +```shell +usage: example_video.py [-h] --vid VID [--output OUTPUT] [--dont_show] + +GazeTracking Video Inference. + +optional arguments: + -h, --help show this help message and exit + --vid VID path to video + --output OUTPUT inference video name. Only support .avi extension due to + OpenCV 3.4 + --dont_show hide imshow window +``` + + ## Documentation In the following examples, `gaze` refers to an instance of the `GazeTracking` class. @@ -166,6 +182,7 @@ Returns the main frame with pupils highlighted. Your suggestions, bugs reports and pull requests are welcome and appreciated. You can also starring ⭐️ the project! If the detection of your pupils is not completely optimal, you can send me a video sample of you looking in different directions. I would use it to improve the algorithm. +* You can use `example_video.py` to produce a video sample. ## Licensing diff --git a/example.py b/example.py index c91a67a3..63a4a0d9 100644 --- a/example.py +++ b/example.py @@ -13,6 +13,10 @@ # We get a new frame from the webcam _, frame = webcam.read() + # Quit the program if no webcam is found + if frame is None: + break + # We send this frame to GazeTracking to analyze it gaze.refresh(frame) diff --git a/example_video.py b/example_video.py new file mode 100644 index 00000000..6c6bc914 --- /dev/null +++ b/example_video.py @@ -0,0 +1,98 @@ +""" +Demonstration of the GazeTracking library for video inference. +Check the README.md for complete documentation. +""" + +import cv2 +from gaze_tracking import GazeTracking +import argparse +import os + +# Parser +def parser(): + parser = argparse.ArgumentParser(description="GazeTracking Video Inference.") + parser.add_argument("--vid", type=str, required=True, help="path to video") + parser.add_argument("--output", type=str, default="result.avi", help="inference video name. Only support .avi extension due to OpenCV 3.4") + parser.add_argument("--dont_show", action='store_true', help="hide imshow window") + return parser.parse_args() + +def check_arguments_errors(args): + if not (os.path.isfile(args.vid)): + raise(ValueError("Invalid video path {}".format(os.path.abspath(args.video)))) + +# Save Video +def save_video(input_video, output_video, size): + fourcc = cv2.VideoWriter_fourcc(*"MJPG") + fps = int(input_video.get(cv2.CAP_PROP_FPS)) + video = cv2.VideoWriter(output_video, fourcc, fps, size) + return video + +# Main +def capture_main(): + cap = cv2.VideoCapture(args.vid) + + # Getting width and heights + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + #Video Saver + video = save_video(cap, args.output, (width, height)) + + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + if frame is not None: + + #Process Inference + image = inference(frame) + + if not args.dont_show: + cv2.imshow('Inference', image) + + video.write(image) + + if cv2.waitKey(1) == 27: + break + cap.release() + video.release() + cv2.destroyAllWindows() + +# Drawing +def inference(frame): + # We send this frame to GazeTracking to analyze it + gaze.refresh(frame) + + frame = gaze.annotated_frame() + text = "" + + if gaze.is_blinking(): + text = "Blinking" + elif gaze.is_right(): + text = "Looking right" + elif gaze.is_left(): + text = "Looking left" + elif gaze.is_center(): + text = "Looking center" + + frame = cv2.putText(frame, text, (int(frame.shape[1]*0.01), int(frame.shape[0]*0.05)), cv2.FONT_HERSHEY_DUPLEX, 1, (147, 58, 31), 2) + + left_pupil = gaze.pupil_left_coords() + right_pupil = gaze.pupil_right_coords() + frame = cv2.putText(frame, "Left pupil: " + str(left_pupil), (int(frame.shape[1]*0.01), int(frame.shape[0]*0.1)), cv2.FONT_HERSHEY_DUPLEX, 0.8, (147, 58, 31), 1) + frame = cv2.putText(frame, "Right pupil: " + str(right_pupil), (int(frame.shape[1]*0.01), int(frame.shape[0]*0.15)), cv2.FONT_HERSHEY_DUPLEX, 0.8, (147, 58, 31), 1) + return frame + + +if __name__ == '__main__': + args = parser() + check_arguments_errors(args) + + gaze = GazeTracking() + + print('='*30) + print('Input Video Loaded: '+str(args.vid)) + capture_main() + print('Inference Video Saved to: '+str(args.output)) + print('='*30) \ No newline at end of file From dd9f3891b1ef1c127b087dc54aee623a9bdcbe14 Mon Sep 17 00:00:00 2001 From: vinesmsuic Date: Wed, 14 Jul 2021 12:52:13 +0800 Subject: [PATCH 4/5] Quit the program if no webcam is found --- example.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/example.py b/example.py index 63a4a0d9..6a8e5e7a 100644 --- a/example.py +++ b/example.py @@ -11,10 +11,10 @@ while True: # We get a new frame from the webcam - _, frame = webcam.read() + ret, frame = webcam.read() # Quit the program if no webcam is found - if frame is None: + if ret is None: break # We send this frame to GazeTracking to analyze it From 56b204828d6fdfcd61bd01cd10a5e190e1db6ea5 Mon Sep 17 00:00:00 2001 From: vinesmsuic Date: Wed, 14 Jul 2021 12:53:04 +0800 Subject: [PATCH 5/5] Update example.py --- example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example.py b/example.py index 6a8e5e7a..9405788d 100644 --- a/example.py +++ b/example.py @@ -14,7 +14,7 @@ ret, frame = webcam.read() # Quit the program if no webcam is found - if ret is None: + if ret == False: break # We send this frame to GazeTracking to analyze it