-[submodule "lib/dlib"]
- path = lib/dlib
- url = https://github.com/davisking/dlib.git
cmake_minimum_required(VERSION 3.16)
-project(FacialLandmarksForCubism_project)
+project(MouseTrackerForCubism_project)
-add_subdirectory(lib/dlib/dlib dlib_build)
-find_package(OpenCV REQUIRED)
-include_directories(${OpenCV_INCLUDE_DIRS})
+find_library(xdo_LIBS NAMES xdo libxdo PATHS /usr/lib REQUIRED)
+find_library(pulse_LIBS NAMES pulse PATHS /usr/lib REQUIRED)
-add_library(FacialLandmarksForCubism STATIC src/facial_landmark_detector.cpp)
-set_target_properties(FacialLandmarksForCubism PROPERTIES PUBLIC_HEADER include/facial_landmark_detector.h)
+include_directories(include)
-target_include_directories(FacialLandmarksForCubism PRIVATE include lib/dlib)
-target_link_libraries(FacialLandmarksForCubism ${OpenCV_LIBS} dlib::dlib)
+add_library(
+ MouseTrackerForCubism STATIC
+ src/mouse_cursor_tracker.cpp
+)
+set_target_properties(
+ MouseTrackerForCubism PROPERTIES PUBLIC_HEADER
+ include/mouse_cursor_tracker.h
+)
+
+target_link_libraries(MouseTrackerForCubism ${xdo_LIBS} ${pulse_LIBS} pulse-simple)
-# Facial Landmarks for Cubism
+# Mouse Tracker for Cubism
-A library that extracts facial landmarks from a webcam feed and converts them
-into Live2D® Cubism SDK parameters.
+A library that tracks mouse cursor location and microphone input and
+converts them into Live2D® Cubism SDK parameters.
+
+This is a spin-off project from [Facial Landmarks for Cubism](https://github.com/adrianiainlam/facial-landmarks-for-cubism).
+The objective is to provide similar functionality, but requiring much
+less CPU load, which can be critical if the processor does not support
+AVX instructions. It also does not require the use of a dataset which
+restricts commercial use, and it does not require a webcam.
*Disclaimer: This library is designed for use with the Live2D® Cubism SDK.
It is not part of the SDK itself, and is not affiliated in any way with Live2D
Inc. The Live2D® Cubism SDK belongs solely to Live2D Inc. You will need to
agree to Live2D Inc.'s license agreements to use the Live2D® Cubism SDK.*
-This block diagram shows the intended usage of this library:
-
-![Block diagram showing interaction of this library with other components](block_diagram.png)
-
-Video showing me using the example program:
-<https://youtu.be/SZPEKwEqbdI>
-
+<!-- TODO maybe make a demo video? -->
## Supporting environments
-This library was developed and tested only on Ubuntu 18.04 using GCC 7.5.0.
-However I don't think I've used anything that prevents it from being
-cross-platform compatible -- it should still work as long as you have a
-recent C/C++ compiler. The library should only require C++11. The Cubism
-SDK requires C++14. I have made use of one C++17 library (`<filesystem>`)
+This library is designed for Linux (or other *nix systems) with X11 and
+PulseAudio.
+
+I have made use of one C++17 library (`<filesystem>`)
in the example program, but it should be straightforward to change this
if you don't have C++17 support.
-I have provided some shell scripts for convenience when building. In an
-environment without a `/bin/sh` shell you may have to run the commands
-manually. Hereafter, all build instructions will assume a Linux environment
-where a shell is available.
-
-If your CPU does not support AVX instructions you may want to edit "build.sh"
-and "example/demo.patch" to remove the `-D USE_AVX_INSTRUCTIONS=1` variable
-(or change AVX to SSE4 or SSE2). However there could be a penalty in
-performance.
-
## Build instructions
1. Install dependencies.
You will require a recent C/C++ compiler, `make`, `patch`, CMake >= 3.16,
- and the OpenCV library (I'm using version 4.3.0). To compile the example
+ libxdo, and PulseAudio. To compile the example
program you will also require the OpenGL library (and its dev headers)
among other libraries required for the example program. The libraries I
had to install (this list may not be exhaustive) are:
- libgl1-mesa-dev libxrandr-dev libxinerama-dev libxcursor-dev libxi-dev libglu1-mesa-dev
+ libxdo-dev libpulse-dev libgl1-mesa-dev libxrandr-dev libxinerama-dev libxcursor-dev libxi-dev libglu1-mesa-dev
2. Clone this repository including its submodule (dlib)
- git clone --recurse-submodules https://github.com/adrianiainlam/facial-landmarks-for-cubism.git
+ git clone https://github.com/adrianiainlam/mouse-tracker-for-cubism.git
3. To build the library only: (Skip this step if you want to build the example
program. It will be done automatically.)
cd <path of the git repo>
./build.sh
-4. You will require a facial landmark dataset to use with dlib. I have
- downloaded mine from
- <http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2>.
- Extract the file and edit the "config.txt" file to point to the
- path to this file.
-
- Note: The license for this dataset excludes commercial use. If you want
- to use this library in a commercial product you will need to obtain a
- dataset in some other way.
-
To build the example program:
-5. Copy the extracted dlib dataset from step 4 to the "example" folder
- of this repo.
-
-6. Download "Cubism 4 SDK for Native R1" from the Live2D website:
+4. Download "Cubism 4 SDK for Native R1" from the Live2D website:
<https://www.live2d.com/en/download/cubism-sdk/download-native/>.
Extract the archive -- put the "CubismSdkForNative-4-r.1" folder under
Note: The Cubism SDK is the property of Live2D and is not part of this
project. You must agree to Live2D's license agreements to use it.
-7. Go into the
+5. Go into the
"example/CubismSdkForNative-4-r.1/Samples/OpenGL/thirdParty/scripts"
directory and run
./setup_glew_glfw
-8. Go back to the "example" directory and run
+6. Go back to the "example" directory and run
./build.sh
-9. Now try running the example program. From the "example" directory:
+7. Now try running the example program. From the "example" directory:
cd ./demo_build/build/make_gcc/bin/Demo/
./Demo
* `--translate-y`, `-y`: Vertical translation of the model within the window
* `--model`, `-m`: Name of the model to be used. This must be located inside
the "Resources" folder.
- * `--config`, `-c`: Path to the configuration file for the Facial Landmarks
+ * `--config`, `-c`: Path to the configuration file for the Mouse Tracker
for Cubism library. See below for more details.
-
## Configuration file
-Due to the differences in hardware and differences in each person's face,
-I have decided to make pretty much every parameter tweakable. The file
+There are fewer tweakable parameters compared to the Facial Landmarks
+library, but I have still kept the configuration file to allow some
+customization. The file
"config.txt" lists and documents all parameters and their default values.
You can change the values there and pass it to the example program using
the `-c` argument. If using the library directly, the path to this file
should be passed to the constructor (or pass an empty string to use
default values).
-## Troubleshooting
-
-1. Example program crashes with SIGILL (Illegal instruction).
-
- Your CPU probably doesn't support AVX instructions which is used by dlib.
- You can confirm this by running
-
- grep avx /proc/cpuinfo
-
- If this is the case, try to find out if your CPU supports SSE4 or SSE2,
- then edit "build.sh" and "example/demo.patch" to change
- `USE_AVX_INSTRUCTIONS=1` to `USE_SSE4_INSTRUCTIONS=1` or
- `USE_SSE2_INSTRUCTIONS=1`.
-
## License
The library itself is provided under the MIT license. By "the library itself"
I refer to the following files that I have provided under this repo:
- * src/facial_landmark_detector.cpp
- * src/math_utils.h
- * include/facial_landmark_detector.h
+ * src/mouse_cursor_tracker.cpp
+ * include/mouse_cursor_tracker.cpp
* and if you decide to build the binary for the library, the resulting
- binary file (typically build/libFacialLandmarksForCubism.a)
+ binary file (typically build/libMouseTrackerForCubism.a)
The license text can be found in LICENSE-MIT.txt, and also at the top of
the .cpp and .h files.
-The library makes use of the dlib library, provided here as a Git
-submodule, which is used under the Boost Software License, version 1.0.
-The full license text can be found under lib/dlib/dlib/LICENSE.txt.
-
The example program is a patched version of the sample program provided
by Live2D (because there's really no point in reinventing the wheel),
and as such, as per the licensing restrictions by Live2D, is still the
Their licenses can be found here:
<https://www.live2d.com/en/download/cubism-sdk/download-native/>.
-The library requires a facial landmark dataset, and the one provided by
-dlib (which is derived from a dataset owned by Imperial College London)
-has been used in development. The license for this dataset excludes
-commercial use. You must obtain an alternative dataset if you wish to
-use this library commercially.
-
This is not a license requirement, but if you find my library useful,
I'd love to hear from you! Send me an email at spam(at)adrianiainlam.tk --
replacing "spam" with the name of this repo :).
## Contributions
Contributions welcome! This is only a hobby weekend project so I don't
-really have many environments / faces to test it on. Feel free to submit
+really have many environments to test it on. Feel free to submit
issues or pull requests on GitHub, or send questions or patches to me
(see my email address above) if you prefer email. Thanks :)
mkdir -p build && cd build
-cmake .. -D CMAKE_BUILD_TYPE=Release -D USE_AVX_INSTRUCTIONS=1
+cmake .. -D CMAKE_BUILD_TYPE=Release
cmake --build . -j6
-# Config file for FacialLandmarksForCubism
+# All coordinates used here are the ones used by the xdo library.
+# For a sense of how these work, run "xdotool getmouselocation".
-# The path of this config file should be passed to the constructor
-# of the FacialLandmarkDetector.
+# Milliseconds to sleep for between each update
+sleep_ms 5
-# Comments are lines that start with a '#' and are ignored by the parser.
-# Note that a line will be considered as a comment ONLY IF the '#' is the
-# very first character of the line, i.e. without any preceeding whitespace.
-
-
-## Section 1: dlib face detection and webcam parameters
-
-# Path to the dlib shape predictor trained dataset
-predictorPath ./shape_predictor_68_face_landmarks.dat
-
-# Value passed to the cv::VideoCapture() ctor
-cvVideoCaptureId 0
-
-# Number of milliseconds to wait after processing each video frame
-# This value controls the frame rate, but the actual frame period
-# is longer due to the time required to process each frame
-cvWaitKeyMs 5
-
-# If 1, show the webcam captured video on-screen; if 0, don't show
-showWebcamVideo 1
-
-# If 1, draw the detected facial landmarks on-screen; if 0, don't draw
-renderLandmarksOnVideo 1
-
-# If 1, laterally invert the image (create a mirror image); if 0, don't invert
-lateralInversion 1
-
-
-## Section 2: Cubism params calculation control
-#
-# These values control how the facial landmarks are translated into
-# parameters that control the Cubism model, and will vary from person
-# to person. The following values seem to work OK for my face, but
-# your milage may vary.
-
-# Section 2.0: Live2D automatic functionality
-# Set 1 to enable, 0 to disable.
-# If these are set, the automatic functionality in Live2D will be enabled.
-# Note: If you set auto blink, eye control will be disabled.
-autoBlink 0
-autoBreath 0
+# Automatic functionality in Live2D
+autoBlink 1
+autoBreath 1
randomMotion 0
-
-# Section 2.1: Face Y direction angle (head pointing up/down)
-# The Y angle is calculated mainly based on the angle formed
-# by the corners and the tip of the nose (hereafter referred
-# to as the "nose angle").
-
-# This applies an offset (in degrees).
-# If you have a webcam at the top of your monitor, then it is likely
-# that when you look at the centre of your monitor, the captured image
-# will have you looking downwards. This offset shifts the angle upwards,
-# so that the resulting avatar will still be looking straight ahead.
-faceYAngleCorrection 10
-
-# This is the baseline value for the nose angle (in radians) when looking
-# straight ahead...
-faceYAngleZeroValue 1.8
-
-# ... and this is when you are looking up...
-faceYAngleUpThreshold 1.3
-
-# ... and when looking down.
-faceYAngleDownThreshold 2.3
-
-# This is an additional multiplication factor applied per degree of rotation
-# in the X direction (left/right) - since the nose angle reduces when
-# turning your head left/right.
-faceYAngleXRotCorrection 0.15
-
-# This is the multiplication factor to reduce by when smiling or laughing -
-# the nose angle increases in such cases.
-faceYAngleSmileCorrection 0.075
-
-
-# Section 2.2: Eye control
-# This is mainly calculated based on the eye aspect ratio (eye height
-# divided by eye width). Note that currently an average of the values
-# of both eyes is applied - mainly due to two reasons: (1) the dlib
-# dataset I'm using fails to detect winks for me, and (2) if this is
-# not done, I frequently get asynchronous blinks which just looks ugly.
-
-# Maximum eye aspect ratio when the eye is closed
-eyeClosedThreshold 0.2
-
-# Minimum eye aspect ratio when the eye is open
-eyeOpenThreshold 0.25
-
-# Max eye aspect ratio to switch to a closed "smiley eye"
-eyeSmileEyeOpenThreshold 0.6
-
-# Min "mouth form" value to switch to a closed "smiley eye"
-# "Mouth form" is 1 when fully smiling / laughing, and 0 when normal
-eyeSmileMouthFormThreshold 0.75
-
-# Min "mouth open" value to switch to a closed "smiley eye"
-# "Mouth open" is 1 when fully open, and 0 when closed
-eyeSmileMouthOpenThreshold 0.5
-
-
-# Section 2.3: Mouth control
-# Two parameters are passed to Cubism to control the mouth:
-# - mouth form: Controls smiles / laughs
-# - mouth openness: How widely open the mouth is
-# Mouth form is calculated by the ratio between the mouth width
-# and the eye separation (distance between the two eyes).
-# Mouth openness is calculated by the ratio between the lip separation
-# (distance between upper and lower lips) and the mouth width.
-
-# Max mouth-width-to-eye-separation ratio to have a normal resting mouth
-mouthNormalThreshold 0.75
-
-# Min mouth-width-to-eye-separation ratio to have a fully smiling
-# or laughing mouth
-mouthSmileThreshold 1.0
-
-# Max lip-separation-to-mouth-width ratio to have a closed mouth
-mouthClosedThreshold 0.1
-
-# Min lip-separation-to-mouth-width ratio to have a fully opened mouth
-mouthOpenThreshold 0.4
-
-# Additional multiplication factor applied to the mouth openness parameter
-# when the mouth is fully smiling / laughing, since doing so increases
-# the mouth width
-mouthOpenLaughCorrection 0.2
-
-
-## Section 3: Filtering parameters
-# The facial landmark coordinates can be quite noisy, so I've applied
-# a simple moving average filter to reduce noise. More taps would mean
-# more samples to average over, hence smoother movements with less noise,
-# but it will also cause more lag between your movement and the movement
-# of the avatar, and quick movements (e.g. blinks) may be completely missed.
-
-faceXAngleNumTaps 11
-faceYAngleNumTaps 11
-faceZAngleNumTaps 11
-mouthFormNumTaps 3
-mouthOpenNumTaps 3
-leftEyeOpenNumTaps 3
-rightEyeOpenNumTaps 3
-
+useLipSync 1
+
+# Lip sync configurations
+
+# Gain to apply to audio volume.
+# (Linear gain multiplying audio RMS value)
+lipSyncGain 10
+
+# Cut-off volume for lip syncing.
+# If the volume is below this value, it will be forced to zero.
+# This can be useful if you don't have a good quality microphone
+# or are in a slightly noisy environment.
+# Note: this cut-off is applied *after* applying the gain.
+lipSyncCutOff 0.15
+
+# Audio buffer size. This is the window size over which we calculate
+# the "real-time" volume. A higher value will give a smoother
+# response, but will mean a higher latency.
+# The sampling rate is set to 44100 Hz so 4096 samples should
+# still have a low latency.
+audioBufSize 4096
+
+# Mouth form
+# Set this to 1 for a fully smiling mouth, 0 for a normal mouth,
+# and -1 for a frowning mouth, or anything in between.
+mouthForm 0
+
+# Screen number. Currently tracking is supported only in one screen.
+# If you have multiple screens, select the ID of the one you want to track.
+screen 0
+
+# The "middle" position, i.e. the coordinates of the cursor where
+# the Live2D model will be looking straight ahead.
+# For a 1920x1080 screen, {1600, 870} will be somewhere near the
+# bottom right corner.
+middle_x 1600
+middle_y 870
+
+# The bounding box. These are the limits of the coordinates where the
+# Live2D model will be looking 30 degrees to each side.
+top 0
+bottom 1079
+left 0
+right 1919
diff -pruN --exclude build ./demo_clean/CMakeLists.txt ./demo_dev/CMakeLists.txt
---- ./demo_clean/CMakeLists.txt 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/CMakeLists.txt 2020-07-11 22:52:49.099117981 +0100
+--- ./demo_clean/CMakeLists.txt 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/CMakeLists.txt 2020-10-01 23:29:15.530233484 +0100
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.16)
# Set app name.
set(APP_NAME Demo)
# Find opengl libraries.
find_package(OpenGL REQUIRED)
-+# Add FacialLandmarksForCubism
-+add_subdirectory(../.. FacialLandmarksForCubism_build)
++# Add MouseTrackerForCubism
++add_subdirectory(../.. MouseTrackerForCubism_build)
+
# Make executable app.
add_executable(${APP_NAME})
Framework
glfw
${OPENGL_LIBRARIES}
-+ FacialLandmarksForCubism
++ MouseTrackerForCubism
+ stdc++fs
)
# Specify include directories.
# Copy resource directory to build directory.
add_custom_command(
-@@ -86,6 +91,17 @@ add_custom_command(
- copy_directory ${RES_PATH} $<TARGET_FILE_DIR:${APP_NAME}>/Resources
- )
-
-+# Copy shape predictor trained dataset to build directory
-+set(DLIB_SHAPE_PREDICTOR_DATA ${CMAKE_CURRENT_SOURCE_DIR}/../shape_predictor_68_face_landmarks.dat
-+ CACHE FILEPATH "Path to dlib shape predictor trained dataset")
-+add_custom_command(
-+ TARGET ${APP_NAME}
-+ POST_BUILD
-+ COMMAND
-+ ${CMAKE_COMMAND} -E
-+ copy ${DLIB_SHAPE_PREDICTOR_DATA} $<TARGET_FILE_DIR:${APP_NAME}>/
-+)
-+
- # You can change target that renderer draws by enabling following definition.
- #
- # * USE_RENDER_TARGET
diff -pruN --exclude build ./demo_clean/scripts/make_gcc ./demo_dev/scripts/make_gcc
---- ./demo_clean/scripts/make_gcc 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/scripts/make_gcc 2020-07-14 15:33:09.865020790 +0100
-@@ -9,5 +9,6 @@ BUILD_PATH=$SCRIPT_PATH/../build/make_gc
- # Run CMake.
+--- ./demo_clean/scripts/make_gcc 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/scripts/make_gcc 2020-10-01 23:43:42.213875065 +0100
+@@ -10,4 +10,4 @@ BUILD_PATH=$SCRIPT_PATH/../build/make_gc
cmake -S "$CMAKE_PATH" \
-B "$BUILD_PATH" \
-- -D CMAKE_BUILD_TYPE=Release
+ -D CMAKE_BUILD_TYPE=Release
-cd "$BUILD_PATH" && make
-+ -D CMAKE_BUILD_TYPE=Release \
-+ -D USE_AVX_INSTRUCTIONS=1
+cd "$BUILD_PATH" && make -j4
diff -pruN --exclude build ./demo_clean/src/CMakeLists.txt ./demo_dev/src/CMakeLists.txt
---- ./demo_clean/src/CMakeLists.txt 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/CMakeLists.txt 2020-07-11 17:39:18.358435702 +0100
+--- ./demo_clean/src/CMakeLists.txt 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/CMakeLists.txt 2020-10-01 22:47:24.842846271 +0100
@@ -19,6 +19,4 @@ target_sources(${APP_NAME}
${CMAKE_CURRENT_SOURCE_DIR}/LAppView.cpp
${CMAKE_CURRENT_SOURCE_DIR}/LAppView.hpp
- ${CMAKE_CURRENT_SOURCE_DIR}/TouchManager.hpp
)
diff -pruN --exclude build ./demo_clean/src/LAppDelegate.cpp ./demo_dev/src/LAppDelegate.cpp
---- ./demo_clean/src/LAppDelegate.cpp 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/LAppDelegate.cpp 2020-07-11 17:35:02.414902548 +0100
+--- ./demo_clean/src/LAppDelegate.cpp 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/LAppDelegate.cpp 2020-10-01 22:47:24.698848890 +0100
@@ -45,7 +45,8 @@ void LAppDelegate::ReleaseInstance()
s_instance = NULL;
}
Csm::csmVector<string> LAppDelegate::Split(const std::string& baseString, char delimiter)
diff -pruN --exclude build ./demo_clean/src/LAppDelegate.hpp ./demo_dev/src/LAppDelegate.hpp
---- ./demo_clean/src/LAppDelegate.hpp 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/LAppDelegate.hpp 2020-07-11 17:34:40.778602504 +0100
+--- ./demo_clean/src/LAppDelegate.hpp 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/LAppDelegate.hpp 2020-10-01 22:47:24.842846271 +0100
@@ -40,7 +40,8 @@ public:
/**
* @brief APPに必要なものを初期化する。
-
-};
diff -pruN --exclude build ./demo_clean/src/LAppLive2DManager.cpp ./demo_dev/src/LAppLive2DManager.cpp
---- ./demo_clean/src/LAppLive2DManager.cpp 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/LAppLive2DManager.cpp 2020-07-11 23:20:11.548419176 +0100
+--- ./demo_clean/src/LAppLive2DManager.cpp 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/LAppLive2DManager.cpp 2020-10-02 02:00:49.961556700 +0100
@@ -52,9 +52,10 @@ void LAppLive2DManager::ReleaseInstance(
LAppLive2DManager::LAppLive2DManager()
return _models.GetSize();
}
+
-+void LAppLive2DManager::SetFacialLandmarkDetector(FacialLandmarkDetector *detector)
++void LAppLive2DManager::SetTracker(MouseCursorTracker *tracker)
+{
+ for (auto it = _models.Begin(); it != _models.End(); ++it)
+ {
-+ (*it)->SetFacialLandmarkDetector(detector);
++ (*it)->SetTracker(tracker);
+ }
+}
+
+ _translateY = translateY;
+}
diff -pruN --exclude build ./demo_clean/src/LAppLive2DManager.hpp ./demo_dev/src/LAppLive2DManager.hpp
---- ./demo_clean/src/LAppLive2DManager.hpp 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/src/LAppLive2DManager.hpp 2020-07-11 23:21:17.969484538 +0100
+--- ./demo_clean/src/LAppLive2DManager.hpp 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/src/LAppLive2DManager.hpp 2020-10-01 23:36:24.583055381 +0100
@@ -6,12 +6,15 @@
*/
#pragma once
class LAppModel;
-+class FacialLandmarkDetector;
++class MouseCursorTracker;
+
/**
* @brief サンプルアプリケーションにおいてCubismModelを管理するクラス<br>
Csm::csmUint32 GetModelNum() const;
+ /**
-+ * @brief Set the pointer to the FacialLandmarkDetector instance
++ * @brief Set the pointer to the MouseCursorTracker instance
+ *
-+ * @param[in] detector : Pointer to FacialLandmarkDetector instance
++ * @param[in] tracker : Pointer to MouseCursorTracker instance
+ */
-+ void SetFacialLandmarkDetector(FacialLandmarkDetector *detector);
++ void SetTracker(MouseCursorTracker *tracker);
+
+ /**
+ * @brief Set projection scale factor and translation parameters
+ float _translateY;
};
diff -pruN --exclude build ./demo_clean/src/LAppModel.cpp ./demo_dev/src/LAppModel.cpp
---- ./demo_clean/src/LAppModel.cpp 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/src/LAppModel.cpp 2020-09-27 17:40:16.401166244 +0100
+--- ./demo_clean/src/LAppModel.cpp 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/src/LAppModel.cpp 2020-10-01 23:34:43.482626010 +0100
@@ -21,6 +21,8 @@
#include "LAppTextureManager.hpp"
#include "LAppDelegate.hpp"
-+#include "facial_landmark_detector.h"
++#include "mouse_cursor_tracker.h"
+
using namespace Live2D::Cubism::Framework;
using namespace Live2D::Cubism::Framework::DefaultParameterId;
using namespace LAppDefine;
-@@ -128,30 +130,6 @@ void LAppModel::SetupModel(ICubismModelS
+@@ -49,6 +51,7 @@ LAppModel::LAppModel()
+ : CubismUserModel()
+ , _modelSetting(NULL)
+ , _userTimeSeconds(0.0f)
++ , _tracker(nullptr)
+ {
+ if (DebugLogEnable)
+ {
+@@ -128,30 +131,6 @@ void LAppModel::SetupModel(ICubismModelS
DeleteBuffer(buffer, path.GetRawString());
}
//Physics
if (strcmp(_modelSetting->GetPhysicsFileName(), "") != 0)
{
-@@ -214,15 +192,6 @@ void LAppModel::SetupModel(ICubismModelS
- }
- }
-
-- // LipSyncIds
-- {
-- csmInt32 lipSyncIdCount = _modelSetting->GetLipSyncParameterCount();
-- for (csmInt32 i = 0; i < lipSyncIdCount; ++i)
-- {
-- _lipSyncIds.PushBack(_modelSetting->GetLipSyncParameterId(i));
-- }
-- }
--
- //Layout
- csmMap<csmString, csmFloat32> layout;
- _modelSetting->GetLayoutMap(layout);
-@@ -335,59 +304,57 @@ void LAppModel::Update()
+@@ -335,59 +314,72 @@ void LAppModel::Update()
const csmFloat32 deltaTimeSeconds = LAppPal::GetDeltaTime();
_userTimeSeconds += deltaTimeSeconds;
- //-----------------------------------------------------------------
- _model->LoadParameters(); // 前回セーブされた状態をロード
- if (_motionManager->IsFinished())
-- {
++ if (_tracker)
+ {
- // モーションの再生がない場合、待機モーションの中からランダムで再生する
- StartRandomMotion(MotionGroupIdle, PriorityIdle);
- }
- else
-+ if (_detector)
- {
+- {
- motionUpdated = _motionManager->UpdateMotion(_model, deltaTimeSeconds); // モーションを更新
- }
- _model->SaveParameters(); // 状態を保存
- //-----------------------------------------------------------------
+ auto idMan = CubismFramework::GetIdManager();
-+ auto params = _detector->getParams();
++ auto params = _tracker->getParams();
- // まばたき
- if (!motionUpdated)
+ StartRandomMotion(MotionGroupIdle, PriorityIdle);
}
- }
--
++ else
++ {
++ _motionManager->UpdateMotion(_model, deltaTimeSeconds); // モーションを更新
++ }
++ _model->SaveParameters(); // 状態を保存
+
- if (_expressionManager != NULL)
- {
- _expressionManager->UpdateMotion(_model, deltaTimeSeconds); // 表情でパラメータ更新(相対変化)
- }
--
+
- //ドラッグによる変化
- //ドラッグによる顔の向きの調整
- _model->AddParameterValue(_idParamAngleX, _dragX * 30); // -30から30の値を加える
- _model->AddParameterValue(_idParamAngleY, _dragY * 30);
- _model->AddParameterValue(_idParamAngleZ, _dragX * _dragY * -30);
--
-- //ドラッグによる体の向きの調整
-- _model->AddParameterValue(_idParamBodyAngleX, _dragX * 10); // -10から10の値を加える
++ if (params.autoBlink && _eyeBlink)
++ {
++ _eyeBlink->UpdateParameters(_model, deltaTimeSeconds);
++ }
+ else
+ {
-+ _motionManager->UpdateMotion(_model, deltaTimeSeconds); // モーションを更新
++ _model->SetParameterValue(idMan->GetId("ParamEyeLOpen"),
++ params.leftEyeOpenness);
++ _model->SetParameterValue(idMan->GetId("ParamEyeROpen"),
++ params.rightEyeOpenness);
+ }
-+ _model->SaveParameters(); // 状態を保存
+
+- //ドラッグによる体の向きの調整
+- _model->AddParameterValue(_idParamBodyAngleX, _dragX * 10); // -10から10の値を加える
++ _model->SetParameterValue(idMan->GetId("ParamMouthForm"),
++ params.mouthForm);
- //ドラッグによる目の向きの調整
- _model->AddParameterValue(_idParamEyeBallX, _dragX); // -1から1の値を加える
- _model->AddParameterValue(_idParamEyeBallY, _dragY);
++ if (params.useLipSync && _lipSync)
++ {
++ csmFloat32 value = params.lipSyncParam; // 0 to 1
- // 呼吸など
- if (_breath != NULL)
- {
- _breath->UpdateParameters(_model, deltaTimeSeconds);
-+ if (params.autoBlink && _eyeBlink)
-+ {
-+ _eyeBlink->UpdateParameters(_model, deltaTimeSeconds);
++ for (csmUint32 i = 0; i < _lipSyncIds.GetSize(); ++i)
++ {
++ _model->AddParameterValue(_lipSyncIds[i], value, 0.8f);
++ }
+ }
+ else
+ {
-+ _model->SetParameterValue(idMan->GetId("ParamEyeLOpen"),
-+ params.leftEyeOpenness);
-+ _model->SetParameterValue(idMan->GetId("ParamEyeROpen"),
-+ params.rightEyeOpenness);
++ _model->SetParameterValue(idMan->GetId("ParamMouthOpenY"),
++ params.mouthOpenness);
+ }
-+ _model->SetParameterValue(idMan->GetId("ParamMouthForm"),
-+ params.mouthForm);
-+ _model->SetParameterValue(idMan->GetId("ParamMouthOpenY"),
-+ params.mouthOpenness);
++
+ _model->SetParameterValue(idMan->GetId("ParamEyeLSmile"),
+ params.leftEyeSmile);
+ _model->SetParameterValue(idMan->GetId("ParamEyeRSmile"),
}
// 物理演算の設定
-@@ -396,17 +363,6 @@ void LAppModel::Update()
+@@ -396,17 +388,6 @@ void LAppModel::Update()
_physics->Evaluate(_model, deltaTimeSeconds);
}
// ポーズの設定
if (_pose != NULL)
{
-@@ -626,3 +582,9 @@ Csm::Rendering::CubismOffscreenFrame_Ope
+@@ -626,3 +607,9 @@ Csm::Rendering::CubismOffscreenFrame_Ope
{
return _renderBuffer;
}
+
-+void LAppModel::SetFacialLandmarkDetector(FacialLandmarkDetector *detector)
++void LAppModel::SetTracker(MouseCursorTracker *tracker)
+{
-+ _detector = detector;
++ _tracker = tracker;
+}
+
diff -pruN --exclude build ./demo_clean/src/LAppModel.hpp ./demo_dev/src/LAppModel.hpp
---- ./demo_clean/src/LAppModel.hpp 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/LAppModel.hpp 2020-07-11 15:40:18.977286166 +0100
+--- ./demo_clean/src/LAppModel.hpp 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/LAppModel.hpp 2020-10-01 23:35:39.254849094 +0100
@@ -13,6 +13,7 @@
#include <Type/csmRectF.hpp>
#include <Rendering/OpenGL/CubismOffscreenSurface_OpenGLES2.hpp>
-+#include "facial_landmark_detector.h"
++#include "mouse_cursor_tracker.h"
/**
* @brief ユーザーが実際に使用するモデルの実装クラス<br>
Csm::Rendering::CubismOffscreenFrame_OpenGLES2& GetRenderBuffer();
+ /**
-+ * @brief Set the pointer to the FacialLandmarkDetector instance
++ * @brief Set the pointer to the MouseCursorTracker instance
+ *
-+ * @param[in] detector : Pointer to FacialLandmarkDetector instance
++ * @param[in] tracker : Pointer to MouseCursorTracker instance
+ */
-+ void SetFacialLandmarkDetector(FacialLandmarkDetector *detector);
++ void SetTracker(MouseCursorTracker *tracker);
+
protected:
/**
Csm::Rendering::CubismOffscreenFrame_OpenGLES2 _renderBuffer; ///< フレームバッファ以外の描画先
+
-+ FacialLandmarkDetector *_detector;
++ MouseCursorTracker *_tracker;
};
diff -pruN --exclude build ./demo_clean/src/LAppPal.cpp ./demo_dev/src/LAppPal.cpp
---- ./demo_clean/src/LAppPal.cpp 2020-09-27 17:43:12.081477263 +0100
-+++ ./demo_dev/src/LAppPal.cpp 2020-07-11 23:29:09.084910139 +0100
+--- ./demo_clean/src/LAppPal.cpp 2020-10-02 02:01:04.829787750 +0100
++++ ./demo_dev/src/LAppPal.cpp 2020-10-01 22:47:24.722848453 +0100
@@ -6,6 +6,7 @@
*/
}
file.read(buf, size);
diff -pruN --exclude build ./demo_clean/src/LAppTextureManager.cpp ./demo_dev/src/LAppTextureManager.cpp
---- ./demo_clean/src/LAppTextureManager.cpp 2020-09-27 17:43:12.085477268 +0100
-+++ ./demo_dev/src/LAppTextureManager.cpp 2020-07-11 22:22:18.004965003 +0100
+--- ./demo_clean/src/LAppTextureManager.cpp 2020-10-02 02:01:04.833787812 +0100
++++ ./demo_dev/src/LAppTextureManager.cpp 2020-10-01 22:47:24.654849690 +0100
@@ -96,6 +96,46 @@ LAppTextureManager::TextureInfo* LAppTex
}
{
for (Csm::csmUint32 i = 0; i < _textures.GetSize(); i++)
diff -pruN --exclude build ./demo_clean/src/LAppTextureManager.hpp ./demo_dev/src/LAppTextureManager.hpp
---- ./demo_clean/src/LAppTextureManager.hpp 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/src/LAppTextureManager.hpp 2020-07-11 17:36:31.180131039 +0100
+--- ./demo_clean/src/LAppTextureManager.hpp 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/src/LAppTextureManager.hpp 2020-10-01 22:47:24.786847290 +0100
@@ -72,6 +72,8 @@ public:
*/
TextureInfo* CreateTextureFromPngFile(std::string fileName);
* @brief 画像の解放
*
diff -pruN --exclude build ./demo_clean/src/LAppView.cpp ./demo_dev/src/LAppView.cpp
---- ./demo_clean/src/LAppView.cpp 2020-09-27 17:43:12.085477268 +0100
-+++ ./demo_dev/src/LAppView.cpp 2020-07-11 17:38:06.905451955 +0100
+--- ./demo_clean/src/LAppView.cpp 2020-10-02 02:01:04.833787812 +0100
++++ ./demo_dev/src/LAppView.cpp 2020-10-01 22:47:24.602850636 +0100
@@ -13,7 +13,6 @@
#include "LAppLive2DManager.hpp"
#include "LAppTextureManager.hpp"
- }
}
diff -pruN --exclude build ./demo_clean/src/LAppView.hpp ./demo_dev/src/LAppView.hpp
---- ./demo_clean/src/LAppView.hpp 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/src/LAppView.hpp 2020-07-11 17:38:25.541708705 +0100
+--- ./demo_clean/src/LAppView.hpp 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/src/LAppView.hpp 2020-10-01 22:47:24.802846999 +0100
@@ -14,7 +14,6 @@
#include "CubismFramework.hpp"
#include <Rendering/OpenGL/CubismOffscreenSurface_OpenGLES2.hpp>
// レンダリング先を別ターゲットにする方式の場合に使用
LAppSprite* _renderSprite; ///< モードによっては_renderBufferのテクスチャを描画
diff -pruN --exclude build ./demo_clean/src/main.cpp ./demo_dev/src/main.cpp
---- ./demo_clean/src/main.cpp 2020-09-27 17:43:12.069477246 +0100
-+++ ./demo_dev/src/main.cpp 2020-07-12 15:06:29.194034887 +0100
-@@ -5,18 +5,156 @@
+--- ./demo_clean/src/main.cpp 2020-10-02 02:01:04.825787688 +0100
++++ ./demo_dev/src/main.cpp 2020-10-01 23:42:12.845205308 +0100
+@@ -5,18 +5,154 @@
* that can be found at https://www.live2d.com/eula/live2d-open-software-license-agreement_en.html.
*/
+namespace fs = std::experimental::filesystem;
+#endif
+
-+
#include "LAppDelegate.hpp"
+#include "LAppLive2DManager.hpp"
-+#include "facial_landmark_detector.h"
++#include "mouse_cursor_tracker.h"
+
+struct CmdArgs
+{
+ float translateX;
+ float translateY;
+ std::string modelName;
-+ std::string cfgPath; // Path to config file for FacialLandmarkDetector
++ std::string cfgPath; // Path to config file for MouseCursorTracker
+};
+
+CmdArgs parseArgv(int argc, char *argv[])
+ // Set default values
+ cmdArgs.windowWidth = 600;
+ cmdArgs.windowHeight = 600;
-+ cmdArgs.windowTitle = "FacialLandmarksForCubism example";
++ cmdArgs.windowTitle = "MouseTrackerForCubism example";
+ cmdArgs.rootDir = fs::current_path();
+ cmdArgs.scaleFactor = 8.0f;
+ cmdArgs.translateX = 0.0f;
- LAppDelegate::GetInstance()->Run();
+ delegate->SetRootDirectory(cmdArgs.rootDir);
+
-+ FacialLandmarkDetector detector(cmdArgs.cfgPath);
++ MouseCursorTracker tracker(cmdArgs.cfgPath);
+
-+ std::thread detectorThread(&FacialLandmarkDetector::mainLoop,
-+ &detector);
++ std::thread trackerThread(&MouseCursorTracker::mainLoop, &tracker);
+
+ LAppLive2DManager *manager = LAppLive2DManager::GetInstance();
+ manager->SetModel(cmdArgs.modelName);
+ manager->SetProjectionScaleTranslate(cmdArgs.scaleFactor,
+ cmdArgs.translateX,
+ cmdArgs.translateY);
-+ manager->SetFacialLandmarkDetector(&detector);
++ manager->SetTracker(&tracker);
+
+ delegate->Run();
+
-+ detector.stop();
-+ detectorThread.join();
++ tracker.stop();
++ trackerThread.join();
return 0;
}
+++ /dev/null
-// -*- mode: c++ -*-
-
-#ifndef __FACIAL_LANDMARK_DETECTOR_H__
-#define __FACIAL_LANDMARK_DETECTOR_H__
-
-/****
-Copyright (c) 2020 Adrian I. Lam
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-****/
-
-#include <deque>
-#include <string>
-#include <opencv2/opencv.hpp>
-#include <dlib/image_processing/frontal_face_detector.h>
-#include <dlib/image_processing.h>
-#include <dlib/gui_widgets.h>
-
-class FacialLandmarkDetector
-{
-public:
- struct Params
- {
- double leftEyeOpenness;
- double rightEyeOpenness;
- double leftEyeSmile;
- double rightEyeSmile;
- double mouthOpenness;
- double mouthForm;
- double faceXAngle;
- double faceYAngle;
- double faceZAngle;
- bool autoBlink;
- bool autoBreath;
- bool randomMotion;
- // TODO eyebrows currently not supported...
- // I'd like to include them, but the dlib detection is very
- // noisy and inaccurate (at least for my face).
- };
-
- FacialLandmarkDetector(std::string cfgPath);
-
- Params getParams(void) const;
-
- void stop(void);
-
- void mainLoop(void);
-
-private:
- enum LeftRight : bool
- {
- LEFT,
- RIGHT
- };
-
- cv::VideoCapture webcam;
- dlib::image_window win;
- dlib::frontal_face_detector detector;
- dlib::shape_predictor predictor;
- bool m_stop;
-
- double calcEyeAspectRatio(dlib::point& p1, dlib::point& p2,
- dlib::point& p3, dlib::point& p4,
- dlib::point& p5, dlib::point& p6) const;
-
- double calcRightEyeAspectRatio(dlib::full_object_detection& shape) const;
- double calcLeftEyeAspectRatio(dlib::full_object_detection& shape) const;
-
- double calcEyeOpenness(LeftRight eye,
- dlib::full_object_detection& shape,
- double faceYAngle) const;
-
- double calcMouthForm(dlib::full_object_detection& shape) const;
- double calcMouthOpenness(dlib::full_object_detection& shape, double mouthForm) const;
-
- double calcFaceXAngle(dlib::full_object_detection& shape) const;
- double calcFaceYAngle(dlib::full_object_detection& shape, double faceXAngle, double mouthForm) const;
- double calcFaceZAngle(dlib::full_object_detection& shape) const;
-
- void populateDefaultConfig(void);
- void parseConfig(std::string cfgPath);
- void throwConfigError(std::string paramName, std::string expectedType,
- std::string line, unsigned int lineNum);
-
-
- std::deque<double> m_leftEyeOpenness;
- std::deque<double> m_rightEyeOpenness;
-
- std::deque<double> m_mouthOpenness;
- std::deque<double> m_mouthForm;
-
- std::deque<double> m_faceXAngle;
- std::deque<double> m_faceYAngle;
- std::deque<double> m_faceZAngle;
-
- struct Config
- {
- int cvVideoCaptureId;
- std::string predictorPath;
- double faceYAngleCorrection;
- double eyeSmileEyeOpenThreshold;
- double eyeSmileMouthFormThreshold;
- double eyeSmileMouthOpenThreshold;
- bool showWebcamVideo;
- bool renderLandmarksOnVideo;
- bool lateralInversion;
- std::size_t faceXAngleNumTaps;
- std::size_t faceYAngleNumTaps;
- std::size_t faceZAngleNumTaps;
- std::size_t mouthFormNumTaps;
- std::size_t mouthOpenNumTaps;
- std::size_t leftEyeOpenNumTaps;
- std::size_t rightEyeOpenNumTaps;
- int cvWaitKeyMs;
- double eyeClosedThreshold;
- double eyeOpenThreshold;
- double mouthNormalThreshold;
- double mouthSmileThreshold;
- double mouthClosedThreshold;
- double mouthOpenThreshold;
- double mouthOpenLaughCorrection;
- double faceYAngleXRotCorrection;
- double faceYAngleSmileCorrection;
- double faceYAngleZeroValue;
- double faceYAngleUpThreshold;
- double faceYAngleDownThreshold;
- bool autoBlink;
- bool autoBreath;
- bool randomMotion;
- } m_cfg;
-};
-
-#endif
-
--- /dev/null
+// -*- mode: c++ -*-
+
+#ifndef __MOUSE_CURSOR_TRACKER_H__
+#define __MOUSE_CURSOR_TRACKER_H__
+
+/****
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+****/
+
+#include <string>
+#include <map>
+#include <thread>
+extern "C"
+{
+#include <xdo.h>
+#include <pulse/simple.h>
+}
+
+class MouseCursorTracker
+{
+public:
+ MouseCursorTracker(std::string cfgPath);
+ ~MouseCursorTracker();
+
+ struct Params
+ {
+ double leftEyeOpenness;
+ double rightEyeOpenness;
+ double leftEyeSmile;
+ double rightEyeSmile;
+ double mouthOpenness;
+ double mouthForm;
+ double faceXAngle;
+ double faceYAngle;
+ double faceZAngle;
+ bool autoBlink;
+ bool autoBreath;
+ bool randomMotion;
+ bool useLipSync;
+ double lipSyncParam;
+ };
+
+ Params getParams(void) const;
+
+ void stop(void);
+
+ void mainLoop(void);
+
+private:
+ struct Coord
+ {
+ int x;
+ int y;
+ };
+
+ struct Config
+ {
+ int sleepMs;
+ bool autoBlink;
+ bool autoBreath;
+ bool randomMotion;
+ bool useLipSync;
+ double lipSyncGain;
+ double lipSyncCutOff;
+ unsigned int audioBufSize;
+ double mouthForm;
+ int top;
+ int bottom;
+ int left;
+ int right;
+ int screen;
+ Coord middle;
+ } m_cfg;
+
+ bool m_stop;
+
+ Coord m_curPos;
+
+ xdo_t *m_xdo;
+
+ std::thread m_getVolumeThread;
+ void audioLoop(void);
+ double m_currentVol;
+ pa_simple *m_pulse;
+
+ void populateDefaultConfig(void);
+ void parseConfig(std::string cfgPath);
+};
+
+#endif
+++ /dev/null
-Subproject commit 23b9abd07a56f9fef560aa9e263b37f82543a0cc
+++ /dev/null
-/****
-Copyright (c) 2020 Adrian I. Lam
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-****/
-
-#include <stdexcept>
-#include <fstream>
-#include <string>
-#include <sstream>
-#include <cmath>
-
-#include <opencv2/opencv.hpp>
-
-#include <dlib/opencv.h>
-#include <dlib/image_processing/frontal_face_detector.h>
-#include <dlib/image_processing.h>
-#include <dlib/image_processing/render_face_detections.h>
-
-#include "facial_landmark_detector.h"
-#include "math_utils.h"
-
-
-static void filterPush(std::deque<double>& buf, double newval,
- std::size_t numTaps)
-{
- buf.push_back(newval);
- while (buf.size() > numTaps)
- {
- buf.pop_front();
- }
-}
-
-FacialLandmarkDetector::FacialLandmarkDetector(std::string cfgPath)
- : m_stop(false)
-{
- parseConfig(cfgPath);
-
- if (!webcam.open(m_cfg.cvVideoCaptureId))
- {
- throw std::runtime_error("Unable to open webcam");
- }
-
- detector = dlib::get_frontal_face_detector();
- dlib::deserialize(m_cfg.predictorPath) >> predictor;
-}
-
-FacialLandmarkDetector::Params FacialLandmarkDetector::getParams(void) const
-{
- Params params;
-
- params.faceXAngle = avg(m_faceXAngle);
- params.faceYAngle = avg(m_faceYAngle) + m_cfg.faceYAngleCorrection;
- // + 10 correct for angle between computer monitor and webcam
- params.faceZAngle = avg(m_faceZAngle);
- params.mouthOpenness = avg(m_mouthOpenness);
- params.mouthForm = avg(m_mouthForm);
-
- double leftEye = avg(m_leftEyeOpenness, 1);
- double rightEye = avg(m_rightEyeOpenness, 1);
- // Just combine the two to get better synchronized blinks
- // This effectively disables winks, so if we want to
- // support winks in the future (see below) we will need
- // a better way to handle this out-of-sync blinks.
- double bothEyes = (leftEye + rightEye) / 2;
- leftEye = bothEyes;
- rightEye = bothEyes;
- // Detect winks and make them look better
- // Commenting out - winks are difficult to be detected by the
- // dlib data set anyway... maybe in the future we can
- // add a runtime option to enable/disable...
- /*if (right == 0 && left > 0.2)
- {
- left = 1;
- }
- else if (left == 0 && right > 0.2)
- {
- right = 1;
- }
- */
- params.leftEyeOpenness = leftEye;
- params.rightEyeOpenness = rightEye;
-
- if (leftEye <= m_cfg.eyeSmileEyeOpenThreshold &&
- rightEye <= m_cfg.eyeSmileEyeOpenThreshold &&
- params.mouthForm > m_cfg.eyeSmileMouthFormThreshold &&
- params.mouthOpenness > m_cfg.eyeSmileMouthOpenThreshold)
- {
- params.leftEyeSmile = 1;
- params.rightEyeSmile = 1;
- }
- else
- {
- params.leftEyeSmile = 0;
- params.rightEyeSmile = 0;
- }
-
- params.autoBlink = m_cfg.autoBlink;
- params.autoBreath = m_cfg.autoBreath;
- params.randomMotion = m_cfg.randomMotion;
-
- return params;
-}
-
-void FacialLandmarkDetector::stop(void)
-{
- m_stop = true;
-}
-
-void FacialLandmarkDetector::mainLoop(void)
-{
- while (!m_stop)
- {
- cv::Mat frame;
- if (!webcam.read(frame))
- {
- throw std::runtime_error("Unable to read from webcam");
- }
- cv::Mat flipped;
- if (m_cfg.lateralInversion)
- {
- cv::flip(frame, flipped, 1);
- }
- else
- {
- flipped = frame;
- }
- dlib::cv_image<dlib::bgr_pixel> cimg(flipped);
-
- if (m_cfg.showWebcamVideo)
- {
- win.set_image(cimg);
- }
-
- std::vector<dlib::rectangle> faces = detector(cimg);
-
- if (faces.size() > 0)
- {
- dlib::rectangle face = faces[0];
- dlib::full_object_detection shape = predictor(cimg, face);
-
- /* The coordinates seem to be rather noisy in general.
- * We will push everything through some moving average filters
- * to reduce noise. The number of taps is determined empirically
- * until we get something good.
- * An alternative method would be to get some better dataset
- * for dlib - perhaps even to train on a custom data set just for the user.
- */
-
- // Face rotation: X direction (left-right)
- double faceXRot = calcFaceXAngle(shape);
- filterPush(m_faceXAngle, faceXRot, m_cfg.faceXAngleNumTaps);
-
- // Mouth form (smile / laugh) detection
- double mouthForm = calcMouthForm(shape);
- filterPush(m_mouthForm, mouthForm, m_cfg.mouthFormNumTaps);
-
- // Face rotation: Y direction (up-down)
- double faceYRot = calcFaceYAngle(shape, faceXRot, mouthForm);
- filterPush(m_faceYAngle, faceYRot, m_cfg.faceYAngleNumTaps);
-
- // Face rotation: Z direction (head tilt)
- double faceZRot = calcFaceZAngle(shape);
- filterPush(m_faceZAngle, faceZRot, m_cfg.faceZAngleNumTaps);
-
- // Mouth openness
- double mouthOpen = calcMouthOpenness(shape, mouthForm);
- filterPush(m_mouthOpenness, mouthOpen, m_cfg.mouthOpenNumTaps);
-
- // Eye openness
- double eyeLeftOpen = calcEyeOpenness(LEFT, shape, faceYRot);
- filterPush(m_leftEyeOpenness, eyeLeftOpen, m_cfg.leftEyeOpenNumTaps);
- double eyeRightOpen = calcEyeOpenness(RIGHT, shape, faceYRot);
- filterPush(m_rightEyeOpenness, eyeRightOpen, m_cfg.rightEyeOpenNumTaps);
-
- // TODO eyebrows?
-
- if (m_cfg.showWebcamVideo && m_cfg.renderLandmarksOnVideo)
- {
- win.clear_overlay();
- win.add_overlay(dlib::render_face_detections(shape));
- }
- }
- else
- {
- if (m_cfg.showWebcamVideo && m_cfg.renderLandmarksOnVideo)
- {
- win.clear_overlay();
- }
- }
-
- cv::waitKey(m_cfg.cvWaitKeyMs);
- }
-}
-
-double FacialLandmarkDetector::calcEyeAspectRatio(
- dlib::point& p1, dlib::point& p2,
- dlib::point& p3, dlib::point& p4,
- dlib::point& p5, dlib::point& p6) const
-{
- double eyeWidth = dist(p1, p4);
- double eyeHeight1 = dist(p2, p6);
- double eyeHeight2 = dist(p3, p5);
-
- return (eyeHeight1 + eyeHeight2) / (2 * eyeWidth);
-}
-
-double FacialLandmarkDetector::calcEyeOpenness(
- LeftRight eye,
- dlib::full_object_detection& shape,
- double faceYAngle) const
-{
- double eyeAspectRatio;
- if (eye == LEFT)
- {
- eyeAspectRatio = calcEyeAspectRatio(shape.part(42), shape.part(43), shape.part(44),
- shape.part(45), shape.part(46), shape.part(47));
- }
- else
- {
- eyeAspectRatio = calcEyeAspectRatio(shape.part(36), shape.part(37), shape.part(38),
- shape.part(39), shape.part(40), shape.part(41));
- }
-
- // Apply correction due to faceYAngle
- double corrEyeAspRat = eyeAspectRatio / std::cos(degToRad(faceYAngle));
-
- return linearScale01(corrEyeAspRat, m_cfg.eyeClosedThreshold, m_cfg.eyeOpenThreshold);
-}
-
-
-
-double FacialLandmarkDetector::calcMouthForm(dlib::full_object_detection& shape) const
-{
- /* Mouth form parameter: 0 for normal mouth, 1 for fully smiling / laughing.
- * Compare distance between the two corners of the mouth
- * to the distance between the two eyes.
- */
-
- /* An alternative (my initial attempt) was to compare the corners of
- * the mouth to the top of the upper lip - they almost lie on a
- * straight line when smiling / laughing. But that is only true
- * when facing straight at the camera. When looking up / down,
- * the angle changes. So here we'll use the distance approach instead.
- */
-
- auto eye1 = centroid(shape.part(36), shape.part(37), shape.part(38),
- shape.part(39), shape.part(40), shape.part(41));
- auto eye2 = centroid(shape.part(42), shape.part(43), shape.part(44),
- shape.part(45), shape.part(46), shape.part(47));
- double distEyes = dist(eye1, eye2);
- double distMouth = dist(shape.part(48), shape.part(54));
-
- double form = linearScale01(distMouth / distEyes,
- m_cfg.mouthNormalThreshold,
- m_cfg.mouthSmileThreshold);
-
- return form;
-}
-
-double FacialLandmarkDetector::calcMouthOpenness(
- dlib::full_object_detection& shape,
- double mouthForm) const
-{
- // Use points for the bottom of the upper lip, and top of the lower lip
- // We have 3 pairs of points available, which give the mouth height
- // on the left, in the middle, and on the right, resp.
- // First let's try to use an average of all three.
- double heightLeft = dist(shape.part(63), shape.part(65));
- double heightMiddle = dist(shape.part(62), shape.part(66));
- double heightRight = dist(shape.part(61), shape.part(67));
-
- double avgHeight = (heightLeft + heightMiddle + heightRight) / 3;
-
- // Now, normalize it with the width of the mouth.
- double width = dist(shape.part(60), shape.part(64));
-
- double normalized = avgHeight / width;
-
- double scaled = linearScale01(normalized,
- m_cfg.mouthClosedThreshold,
- m_cfg.mouthOpenThreshold,
- true, false);
-
- // Apply correction according to mouthForm
- // Notice that when you smile / laugh, width is increased
- scaled *= (1 + m_cfg.mouthOpenLaughCorrection * mouthForm);
-
- return scaled;
-}
-
-double FacialLandmarkDetector::calcFaceXAngle(dlib::full_object_detection& shape) const
-{
- // This function will be easier to understand if you refer to the
- // diagram in faceXAngle.png
-
- // Construct the y-axis using (1) average of four points on the nose and
- // (2) average of four points on the upper lip.
-
- auto y0 = centroid(shape.part(27), shape.part(28), shape.part(29),
- shape.part(30));
- auto y1 = centroid(shape.part(50), shape.part(51), shape.part(52),
- shape.part(62));
-
- // Now drop a perpedicular from the left and right edges of the face,
- // and calculate the ratio between the lengths of these perpendiculars
-
- auto left = centroid(shape.part(14), shape.part(15), shape.part(16));
- auto right = centroid(shape.part(0), shape.part(1), shape.part(2));
-
- // Constructing a perpendicular:
- // Join the left/right point and the upper lip. The included angle
- // can now be determined using cosine rule.
- // Then sine of this angle is the perpendicular divided by the newly
- // created line.
- double opp = dist(right, y0);
- double adj1 = dist(y0, y1);
- double adj2 = dist(y1, right);
- double angle = solveCosineRuleAngle(opp, adj1, adj2);
- double perpRight = adj2 * std::sin(angle);
-
- opp = dist(left, y0);
- adj2 = dist(y1, left);
- angle = solveCosineRuleAngle(opp, adj1, adj2);
- double perpLeft = adj2 * std::sin(angle);
-
- // Model the head as a sphere and look from above.
- double theta = std::asin((perpRight - perpLeft) / (perpRight + perpLeft));
-
- theta = radToDeg(theta);
- if (theta < -30) theta = -30;
- if (theta > 30) theta = 30;
- return theta;
-}
-
-double FacialLandmarkDetector::calcFaceYAngle(dlib::full_object_detection& shape, double faceXAngle, double mouthForm) const
-{
- // Use the nose
- // angle between the two left/right points and the tip
- double c = dist(shape.part(31), shape.part(35));
- double a = dist(shape.part(30), shape.part(31));
- double b = dist(shape.part(30), shape.part(35));
-
- double angle = solveCosineRuleAngle(c, a, b);
-
- // This probably varies a lot from person to person...
-
- // Best is probably to work out some trigonometry again,
- // but just linear interpolation seems to work ok...
-
- // Correct for X rotation
- double corrAngle = angle * (1 + (std::abs(faceXAngle) / 30
- * m_cfg.faceYAngleXRotCorrection));
-
- // Correct for smiles / laughs - this increases the angle
- corrAngle *= (1 - mouthForm * m_cfg.faceYAngleSmileCorrection);
-
- if (corrAngle >= m_cfg.faceYAngleZeroValue)
- {
- return -30 * linearScale01(corrAngle,
- m_cfg.faceYAngleZeroValue,
- m_cfg.faceYAngleDownThreshold,
- false, false);
- }
- else
- {
- return 30 * (1 - linearScale01(corrAngle,
- m_cfg.faceYAngleUpThreshold,
- m_cfg.faceYAngleZeroValue,
- false, false));
- }
-}
-
-double FacialLandmarkDetector::calcFaceZAngle(dlib::full_object_detection& shape) const
-{
- // Use average of eyes and nose
-
- auto eyeRight = centroid(shape.part(36), shape.part(37), shape.part(38),
- shape.part(39), shape.part(40), shape.part(41));
- auto eyeLeft = centroid(shape.part(42), shape.part(43), shape.part(44),
- shape.part(45), shape.part(46), shape.part(47));
-
- auto noseLeft = shape.part(35);
- auto noseRight = shape.part(31);
-
- double eyeYDiff = eyeRight.y() - eyeLeft.y();
- double eyeXDiff = eyeRight.x() - eyeLeft.x();
-
- double angle1 = std::atan(eyeYDiff / eyeXDiff);
-
- double noseYDiff = noseRight.y() - noseLeft.y();
- double noseXDiff = noseRight.x() - noseLeft.x();
-
- double angle2 = std::atan(noseYDiff / noseXDiff);
-
- return radToDeg((angle1 + angle2) / 2);
-}
-
-void FacialLandmarkDetector::parseConfig(std::string cfgPath)
-{
- populateDefaultConfig();
- if (cfgPath != "")
- {
- std::ifstream file(cfgPath);
-
- if (!file)
- {
- throw std::runtime_error("Failed to open config file");
- }
-
- std::string line;
- unsigned int lineNum = 0;
-
- while (std::getline(file, line))
- {
- lineNum++;
-
- if (line[0] == '#')
- {
- continue;
- }
-
- std::istringstream ss(line);
- std::string paramName;
- if (ss >> paramName)
- {
- if (paramName == "cvVideoCaptureId")
- {
- if (!(ss >> m_cfg.cvVideoCaptureId))
- {
- throwConfigError(paramName, "int",
- line, lineNum);
- }
- }
- else if (paramName == "predictorPath")
- {
- if (!(ss >> m_cfg.predictorPath))
- {
- throwConfigError(paramName, "std::string",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleCorrection")
- {
- if (!(ss >> m_cfg.faceYAngleCorrection))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "eyeSmileEyeOpenThreshold")
- {
- if (!(ss >> m_cfg.eyeSmileEyeOpenThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "eyeSmileMouthFormThreshold")
- {
- if (!(ss >> m_cfg.eyeSmileMouthFormThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "eyeSmileMouthOpenThreshold")
- {
- if (!(ss >> m_cfg.eyeSmileMouthOpenThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "showWebcamVideo")
- {
- if (!(ss >> m_cfg.showWebcamVideo))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else if (paramName == "renderLandmarksOnVideo")
- {
- if (!(ss >> m_cfg.renderLandmarksOnVideo))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else if (paramName == "lateralInversion")
- {
- if (!(ss >> m_cfg.lateralInversion))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else if (paramName == "faceXAngleNumTaps")
- {
- if (!(ss >> m_cfg.faceXAngleNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleNumTaps")
- {
- if (!(ss >> m_cfg.faceYAngleNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "faceZAngleNumTaps")
- {
- if (!(ss >> m_cfg.faceZAngleNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "mouthFormNumTaps")
- {
- if (!(ss >> m_cfg.mouthFormNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "mouthOpenNumTaps")
- {
- if (!(ss >> m_cfg.mouthOpenNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "leftEyeOpenNumTaps")
- {
- if (!(ss >> m_cfg.leftEyeOpenNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "rightEyeOpenNumTaps")
- {
- if (!(ss >> m_cfg.rightEyeOpenNumTaps))
- {
- throwConfigError(paramName, "std::size_t",
- line, lineNum);
- }
- }
- else if (paramName == "cvWaitKeyMs")
- {
- if (!(ss >> m_cfg.cvWaitKeyMs))
- {
- throwConfigError(paramName, "int",
- line, lineNum);
- }
- }
- else if (paramName == "eyeClosedThreshold")
- {
- if (!(ss >> m_cfg.eyeClosedThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "eyeOpenThreshold")
- {
- if (!(ss >> m_cfg.eyeOpenThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "mouthNormalThreshold")
- {
- if (!(ss >> m_cfg.mouthNormalThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "mouthSmileThreshold")
- {
- if (!(ss >> m_cfg.mouthSmileThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "mouthClosedThreshold")
- {
- if (!(ss >> m_cfg.mouthClosedThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "mouthOpenThreshold")
- {
- if (!(ss >> m_cfg.mouthOpenThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "mouthOpenLaughCorrection")
- {
- if (!(ss >> m_cfg.mouthOpenLaughCorrection))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleXRotCorrection")
- {
- if (!(ss >> m_cfg.faceYAngleXRotCorrection))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleSmileCorrection")
- {
- if (!(ss >> m_cfg.faceYAngleSmileCorrection))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleZeroValue")
- {
- if (!(ss >> m_cfg.faceYAngleZeroValue))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleUpThreshold")
- {
- if (!(ss >> m_cfg.faceYAngleUpThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "faceYAngleDownThreshold")
- {
- if (!(ss >> m_cfg.faceYAngleDownThreshold))
- {
- throwConfigError(paramName, "double",
- line, lineNum);
- }
- }
- else if (paramName == "autoBlink")
- {
- if (!(ss >> m_cfg.autoBlink))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else if (paramName == "autoBreath")
- {
- if (!(ss >> m_cfg.autoBreath))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else if (paramName == "randomMotion")
- {
- if (!(ss >> m_cfg.randomMotion))
- {
- throwConfigError(paramName, "bool",
- line, lineNum);
- }
- }
- else
- {
- std::ostringstream oss;
- oss << "Unrecognized parameter name at line " << lineNum
- << ": " << paramName;
- throw std::runtime_error(oss.str());
- }
- }
- }
- }
-}
-
-void FacialLandmarkDetector::populateDefaultConfig(void)
-{
- // These are values that I've personally tested to work OK for my face.
- // Your milage may vary - hence the config file.
-
- m_cfg.cvVideoCaptureId = 0;
- m_cfg.predictorPath = "shape_predictor_68_face_landmarks.dat";
- m_cfg.faceYAngleCorrection = 10;
- m_cfg.eyeSmileEyeOpenThreshold = 0.6;
- m_cfg.eyeSmileMouthFormThreshold = 0.75;
- m_cfg.eyeSmileMouthOpenThreshold = 0.5;
- m_cfg.showWebcamVideo = true;
- m_cfg.renderLandmarksOnVideo = true;
- m_cfg.lateralInversion = true;
- m_cfg.cvWaitKeyMs = 5;
- m_cfg.faceXAngleNumTaps = 11;
- m_cfg.faceYAngleNumTaps = 11;
- m_cfg.faceZAngleNumTaps = 11;
- m_cfg.mouthFormNumTaps = 3;
- m_cfg.mouthOpenNumTaps = 3;
- m_cfg.leftEyeOpenNumTaps = 3;
- m_cfg.rightEyeOpenNumTaps = 3;
- m_cfg.eyeClosedThreshold = 0.2;
- m_cfg.eyeOpenThreshold = 0.25;
- m_cfg.mouthNormalThreshold = 0.75;
- m_cfg.mouthSmileThreshold = 1.0;
- m_cfg.mouthClosedThreshold = 0.1;
- m_cfg.mouthOpenThreshold = 0.4;
- m_cfg.mouthOpenLaughCorrection = 0.2;
- m_cfg.faceYAngleXRotCorrection = 0.15;
- m_cfg.faceYAngleSmileCorrection = 0.075;
- m_cfg.faceYAngleZeroValue = 1.8;
- m_cfg.faceYAngleDownThreshold = 2.3;
- m_cfg.faceYAngleUpThreshold = 1.3;
- m_cfg.autoBlink = false;
- m_cfg.autoBreath = false;
- m_cfg.randomMotion = false;
-}
-
-void FacialLandmarkDetector::throwConfigError(std::string paramName,
- std::string expectedType,
- std::string line,
- unsigned int lineNum)
-{
- std::ostringstream ss;
- ss << "Error parsing config file for parameter " << paramName
- << "\nAt line " << lineNum << ": " << line
- << "\nExpecting value of type " << expectedType;
-
- throw std::runtime_error(ss.str());
-}
-
+++ /dev/null
-// -*- mode: c++ -*-
-
-#ifndef __FACE_DETECTOR_MATH_UTILS_H__
-#define __FACE_DETECTOR_MATH_UTILS_H__
-
-/****
-Copyright (c) 2020 Adrian I. Lam
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-****/
-
-#include <cmath>
-#include <initializer_list>
-#include <dlib/image_processing.h>
-
-static const double PI = 3.14159265358979;
-
-template<class T>
-static double avg(T container, double defaultValue = 0)
-{
- if (container.size() == 0)
- {
- return defaultValue;
- }
-
- double sum = 0;
- for (auto it = container.begin(); it != container.end(); ++it)
- {
- sum += *it;
- }
- return sum / container.size();
-}
-
-template<class... Args>
-static dlib::point centroid(Args&... args)
-{
- std::size_t numArgs = sizeof...(args);
- if (numArgs == 0) return dlib::point(0, 0);
-
- double sumX = 0, sumY = 0;
- for (auto point : {args...})
- {
- sumX += point.x();
- sumY += point.y();
- }
-
- return dlib::point(sumX / numArgs, sumY / numArgs);
-}
-
-static inline double sq(double x)
-{
- return x * x;
-}
-
-static double solveCosineRuleAngle(double opposite,
- double adjacent1,
- double adjacent2)
-{
- // c^2 = a^2 + b^2 - 2 a b cos(C)
- double cosC = (sq(opposite) - sq(adjacent1) - sq(adjacent2)) /
- (-2 * adjacent1 * adjacent2);
- return std::acos(cosC);
-}
-
-static inline double radToDeg(double rad)
-{
- return rad * 180 / PI;
-}
-
-static inline double degToRad(double deg)
-{
- return deg * PI / 180;
-}
-
-double dist(dlib::point& p1, dlib::point& p2)
-{
- double xDist = p1.x() - p2.x();
- double yDist = p1.y() - p2.y();
-
- return std::hypot(xDist, yDist);
-}
-
-/*! Scale linearly from 0 to 1 (both end-points inclusive) */
-double linearScale01(double num, double min, double max,
- bool clipMin = true, bool clipMax = true)
-{
- if (num < min && clipMin) return 0.0;
- if (num > max && clipMax) return 1.0;
- return (num - min) / (max - min);
-}
-
-#endif
--- /dev/null
+/****
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+****/
+
+#include <stdexcept>
+#include <string>
+#include <chrono>
+#include <thread>
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <cstdlib>
+#include <cmath>
+
+#include <iostream>
+
+extern "C"
+{
+#include <xdo.h>
+#include <pulse/simple.h>
+}
+#include "mouse_cursor_tracker.h"
+
+static double rms(float *buf, std::size_t count)
+{
+ double sum = 0;
+ for (std::size_t i = 0; i < count; i++)
+ {
+ sum += buf[i] * buf[i];
+ }
+ return std::sqrt(sum / count);
+}
+
+MouseCursorTracker::MouseCursorTracker(std::string cfgPath)
+ : m_stop(false)
+{
+ parseConfig(cfgPath);
+ m_xdo = xdo_new(nullptr);
+
+ const pa_sample_spec ss =
+ {
+ .format = PA_SAMPLE_FLOAT32NE,
+ .rate = 44100,
+ .channels = 2
+ };
+ m_pulse = pa_simple_new(nullptr, "MouseCursorTracker", PA_STREAM_RECORD,
+ nullptr, "LipSync", &ss, nullptr, nullptr, nullptr);
+ if (!m_pulse)
+ {
+ throw std::runtime_error("Unable to create pulse");
+ }
+
+ m_getVolumeThread = std::thread(&MouseCursorTracker::audioLoop, this);
+}
+
+void MouseCursorTracker::audioLoop(void)
+{
+ float *buf = new float[m_cfg.audioBufSize];
+
+ std::size_t audioBufByteSize = m_cfg.audioBufSize * sizeof *buf;
+
+ while (!m_stop)
+ {
+ if (pa_simple_read(m_pulse, buf, audioBufByteSize, nullptr) < 0)
+ {
+ throw std::runtime_error("Unable to get audio data");
+ }
+ m_currentVol = rms(buf, m_cfg.audioBufSize);
+ }
+
+ delete[] buf;
+}
+
+MouseCursorTracker::~MouseCursorTracker()
+{
+ xdo_free(m_xdo);
+ m_getVolumeThread.join();
+ pa_simple_free(m_pulse);
+}
+
+void MouseCursorTracker::stop(void)
+{
+ m_stop = true;
+}
+
+MouseCursorTracker::Params MouseCursorTracker::getParams(void) const
+{
+ Params params = Params();
+
+ int xOffset = m_curPos.x - m_cfg.middle.x;
+ int leftRange = m_cfg.middle.x - m_cfg.left;
+ int rightRange = m_cfg.right - m_cfg.middle.x;
+
+ if (xOffset > 0) // i.e. to the right
+ {
+ params.faceXAngle = 30.0 * xOffset / rightRange;
+ }
+ else // to the left
+ {
+ params.faceXAngle = 30.0 * xOffset / leftRange;
+ }
+
+ int yOffset = m_curPos.y - m_cfg.middle.y;
+ int topRange = m_cfg.middle.y - m_cfg.top;
+ int bottomRange = m_cfg.bottom - m_cfg.middle.y;
+
+ if (yOffset > 0) // downwards
+ {
+ params.faceYAngle = -30.0 * yOffset / bottomRange;
+ }
+ else // upwards
+ {
+ params.faceYAngle = -30.0 * yOffset / topRange;
+ }
+
+ params.faceZAngle = 0;
+
+ params.leftEyeOpenness = 1;
+ params.rightEyeOpenness = 1;
+
+ params.autoBlink = m_cfg.autoBlink;
+ params.autoBreath = m_cfg.autoBreath;
+ params.randomMotion = m_cfg.randomMotion;
+ params.useLipSync = m_cfg.useLipSync;
+
+ params.mouthForm = m_cfg.mouthForm;
+
+ if (m_cfg.useLipSync)
+ {
+ params.lipSyncParam = m_currentVol * m_cfg.lipSyncGain;
+ if (params.lipSyncParam < m_cfg.lipSyncCutOff)
+ {
+ params.lipSyncParam = 0;
+ }
+ else if (params.lipSyncParam > 1)
+ {
+ params.lipSyncParam = 1;
+ }
+ }
+
+
+ // Leave everything else as zero
+
+
+ return params;
+}
+
+void MouseCursorTracker::mainLoop(void)
+{
+ while (!m_stop)
+ {
+ int x;
+ int y;
+ int screenNum;
+
+ xdo_get_mouse_location(m_xdo, &x, &y, &screenNum);
+
+ if (screenNum == m_cfg.screen)
+ {
+ m_curPos.x = x;
+ m_curPos.y = y;
+ }
+ // else just silently ignore for now
+ std::this_thread::sleep_for(std::chrono::milliseconds(m_cfg.sleepMs));
+ }
+}
+
+void MouseCursorTracker::parseConfig(std::string cfgPath)
+{
+ populateDefaultConfig();
+
+ if (cfgPath != "")
+ {
+ std::ifstream file(cfgPath);
+ if (!file)
+ {
+ throw std::runtime_error("Failed to open config file");
+ }
+ std::string line;
+ unsigned int lineNum = 0;
+
+ while (std::getline(file, line))
+ {
+ if (line[0] == '#')
+ {
+ continue;
+ }
+
+ std::istringstream ss(line);
+ std::string paramName;
+
+ if (ss >> paramName)
+ {
+ if (paramName == "sleep_ms")
+ {
+ if (!(ss >> m_cfg.sleepMs))
+ {
+ throw std::runtime_error("Error parsing sleep_ms");
+ }
+ }
+ else if (paramName == "autoBlink")
+ {
+ if (!(ss >> m_cfg.autoBlink))
+ {
+ throw std::runtime_error("Error parsing autoBlink");
+ }
+ }
+ else if (paramName == "autoBreath")
+ {
+ if (!(ss >> m_cfg.autoBreath))
+ {
+ throw std::runtime_error("Error parsing autoBreath");
+ }
+ }
+ else if (paramName == "randomMotion")
+ {
+ if (!(ss >> m_cfg.randomMotion))
+ {
+ throw std::runtime_error("Error parsing randomMotion");
+ }
+ }
+ else if (paramName == "useLipSync")
+ {
+ if (!(ss >> m_cfg.useLipSync))
+ {
+ throw std::runtime_error("Error parsing useLipSync");
+ }
+ }
+ else if (paramName == "lipSyncGain")
+ {
+ if (!(ss >> m_cfg.lipSyncGain))
+ {
+ throw std::runtime_error("Error parsing lipSyncGain");
+ }
+ }
+ else if (paramName == "lipSyncCutOff")
+ {
+ if (!(ss >> m_cfg.lipSyncCutOff))
+ {
+ throw std::runtime_error("Error parsing lipSyncCutOff");
+ }
+ }
+ else if (paramName == "audioBufSize")
+ {
+ if (!(ss >> m_cfg.audioBufSize))
+ {
+ throw std::runtime_error("Error parsing audioBufSize");
+ }
+ }
+ else if (paramName == "mouthForm")
+ {
+ if (!(ss >> m_cfg.mouthForm))
+ {
+ throw std::runtime_error("Error parsing mouthForm");
+ }
+ }
+ else if (paramName == "screen")
+ {
+ if (!(ss >> m_cfg.screen))
+ {
+ throw std::runtime_error("Error parsing screen");
+ }
+ }
+ else if (paramName == "middle_x")
+ {
+ if (!(ss >> m_cfg.middle.x))
+ {
+ throw std::runtime_error("Error parsing middle_x");
+ }
+ }
+ else if (paramName == "middle_y")
+ {
+ if (!(ss >> m_cfg.middle.y))
+ {
+ throw std::runtime_error("Error parsing middle_y");
+ }
+ }
+ else if (paramName == "top")
+ {
+ if (!(ss >> m_cfg.top))
+ {
+ throw std::runtime_error("Error parsing top");
+ }
+ }
+ else if (paramName == "bottom")
+ {
+ if (!(ss >> m_cfg.bottom))
+ {
+ throw std::runtime_error("Error parsing bottom");
+ }
+ }
+ else if (paramName == "left")
+ {
+ if (!(ss >> m_cfg.left))
+ {
+ throw std::runtime_error("Error parsing left");
+ }
+ }
+ else if (paramName == "right")
+ {
+ if (!(ss >> m_cfg.right))
+ {
+ throw std::runtime_error("Error parsing right");
+ }
+ }
+ else
+ {
+ throw std::runtime_error("Unrecognized config parameter");
+ }
+ }
+ }
+ }
+}
+
+void MouseCursorTracker::populateDefaultConfig(void)
+{
+ m_cfg.sleepMs = 5;
+ m_cfg.autoBlink = true;
+ m_cfg.autoBreath = true;
+ m_cfg.randomMotion = false;
+ m_cfg.useLipSync = true;
+ m_cfg.lipSyncGain = 10;
+ m_cfg.lipSyncCutOff = 0.15;
+ m_cfg.audioBufSize = 4096;
+ m_cfg.mouthForm = 0;
+ m_cfg.top = 0;
+ m_cfg.bottom = 1079;
+ m_cfg.left = 0;
+ m_cfg.right = 1919; // These will be the full screen for 1920x1080
+
+ m_cfg.screen = 0;
+ m_cfg.middle = {1600, 870}; // Somewhere near the bottom right
+}
+