Initial commit - it should now be more or less working
authorAdrian Iain Lam <adrianiainlam@users.noreply.github.com>
Sun, 12 Jul 2020 15:27:24 +0000 (16:27 +0100)
committerAdrian Iain Lam <adrianiainlam@users.noreply.github.com>
Sun, 12 Jul 2020 15:27:24 +0000 (16:27 +0100)
14 files changed:
CMakeLists.txt [new file with mode: 0644]
LICENSE-MIT.txt [new file with mode: 0644]
LICENSE-WTFPL.txt [new file with mode: 0644]
README.md [new file with mode: 0644]
block_diagram.png [new file with mode: 0644]
build.sh [new file with mode: 0755]
config.txt [new file with mode: 0644]
example/build.sh [new file with mode: 0755]
example/demo.patch [new file with mode: 0644]
example/generate_patch.sh [new file with mode: 0755]
include/facial_landmark_detector.h [new file with mode: 0644]
src/faceXAngle.png [new file with mode: 0644]
src/facial_landmark_detector.cpp [new file with mode: 0644]
src/math_utils.h [new file with mode: 0644]

diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644 (file)
index 0000000..eb72aa8
--- /dev/null
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.16)
+
+project(FacialLandmarksForCubism_project)
+
+add_subdirectory(lib/dlib/dlib dlib_build)
+find_package(OpenCV REQUIRED)
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+add_library(FacialLandmarksForCubism STATIC src/facial_landmark_detector.cpp)
+set_target_properties(FacialLandmarksForCubism PROPERTIES PUBLIC_HEADER include/facial_landmark_detector.h)
+
+target_include_directories(FacialLandmarksForCubism PRIVATE include lib/dlib)
+target_link_libraries(FacialLandmarksForCubism ${OpenCV_LIBS} dlib::dlib)
+
diff --git a/LICENSE-MIT.txt b/LICENSE-MIT.txt
new file mode 100644 (file)
index 0000000..ac4d357
--- /dev/null
@@ -0,0 +1,19 @@
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/LICENSE-WTFPL.txt b/LICENSE-WTFPL.txt
new file mode 100644 (file)
index 0000000..ee7d6a5
--- /dev/null
@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\r
+                    Version 2, December 2004\r
+\r
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>\r
+\r
+ Everyone is permitted to copy and distribute verbatim or modified\r
+ copies of this license document, and changing it is allowed as long\r
+ as the name is changed.\r
+\r
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\r
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\r
+\r
+  0. You just DO WHAT THE FUCK YOU WANT TO.\r
+\r
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..d59ef66
--- /dev/null
+++ b/README.md
@@ -0,0 +1,187 @@
+# Facial Landmarks for Cubism
+
+A library that extracts facial landmarks from a webcam feed and converts them
+into Live2D® Cubism SDK parameters.
+
+*Disclaimer: This library is designed for use with the Live2D® Cubism SDK.
+It is not part of the SDK itself, and is not affiliated in any way with Live2D
+Inc. The Live2D® Cubism SDK belongs solely to Live2D Inc. You will need to
+agree to Live2D Inc.'s license agreements to use the Live2D® Cubism SDK.*
+
+This block diagram shows the intended usage of this library:
+
+![Block diagram showing interaction of this library with other components](block_diagram.png)
+
+Video showing me using the example program:
+<https://www.youtube.com/watch?v=Ve9GaeyMytc>
+
+
+## Supporting environments
+
+This library was developed and tested only on Ubuntu 18.04 using GCC 7.5.0.
+However I don't think I've used anything that prevents it from being
+cross-platform compatible -- it should still work as long as you have a
+recent C/C++ compiler. (The library should only require C++11. The Cubism
+SDK requires C++14. I have made use of one C++17 library (`<filesystem>`)
+in the example program, but it should be straightforward to change this
+if you don't have C++17 support.
+
+I have provided some shell scripts for convenience when building. In an
+environment without a `/bin/sh` shell you may have to run the commands
+manually. Hereafter, all build instructions will assume a Linux environment
+where a shell is available.
+
+If your CPU does not support AVX instructions you may want to edit the
+relevant build scripts to remove the `-D USE_AVX_INSTRUCTIONS=1` variable
+(or change it the SSE3 etc). However there could be a penalty in performance.
+
+## Build instructions
+
+1. Install dependencies.
+
+   You will require a recent C/C++ compiler, `make`, `patch`, CMake >= 3.16,
+   and the OpenCV library (I'm using version 4.3.0). To compile the example
+   program you will also require the OpenGL library (and its dev headers)
+   among other libraries required for the example program. The libraries I
+   had to install (this list may not be exhaustive) are:
+
+       libgl1-mesa-dev libxrandr-dev libxinerama-dev libxcursor-dev libxi-dev libglu1-mesa-dev
+
+2. Clone this repository including its submodule (dlib)
+
+       git clone --recurse-submodules https://github.com/adrianiainlam/facial-landmarks-for-cubism.git
+
+3. To build the library only:
+
+       cd <path of the git repo>
+       ./build.sh
+
+4. You will require a facial landmark dataset to use with dlib. I have
+   downloaded mine from
+   <http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2>.
+   Extract the file and edit the "config.txt" file to point to the
+   path to this file.
+
+   Note: The license for this dataset excludes commercial use. If you want
+   to use this library in a commercial product you will need to obtain a
+   dataset in some other way.
+
+To build the example program:
+
+5. Copy the extracted dlib dataset from step 4 to the "example" folder
+   of this repo.
+
+6. Download "Cubism 4 SDK for Native R1" from the Live2D website:
+   <https://www.live2d.com/en/download/cubism-sdk/download-native/>.
+
+   Extract the archive -- put the "CubismSdkForNative-4-r.1" folder under
+   the "example" folder of this repo.
+
+   Note: The Cubism SDK is the property of Live2D and is not part of this
+   project. You must agree to Live2D's license agreements to use it.
+
+7. Go into the
+   "example/CubismSdkForNative-4-r.1/Samples/OpenGL/thirdParty/scripts"
+   directory and run
+
+       ./setup_glew_glfw
+
+8. Go back to the "example" directory and run
+
+       ./build.sh
+
+9. Now try running the example program. From the "example" directory:
+
+       cd ./demo_build/build/make_gcc/bin/Demo/
+       ./Demo
+
+
+## Command-line arguments for the example program
+
+Most command-line arguments are to control the Cubism side of the program.
+Only one argument (`--config`) is used to specify the configuration file
+for the Facial Landmarks for Cubism library.
+
+ * `--window-width`, `-W`: Specify the window width
+ * `--window-height`, `-H`: Specify the window height
+ * `--window-title`, `-t`: Specify the window title
+ * `--root-dir`, `-d`: The directory at which the "Resources" folder will
+   be found. This is where the model data will be located.
+ * `--scale-factor`, `-f`: How the model should be scaled
+ * `--translate-x`, `-x`: Horizontal translation of the model within the
+   window
+ * `--translate-y`, `-y`: Vertical translation of the model within the window
+ * `--model`, `-m`: Name of the model to be used. This must be located inside
+   the "Resources" folder.
+ * `--config`, `-c`: Path to the configuration file for the Facial Landmarks
+   for Cubism library. See below for more details.
+
+
+## Configuration file
+
+Due to the differences in hardware and differences in each person's face,
+I have decided to make pretty much every parameter tweakable. The file
+"config.txt" lists and documents all parameters and their default values.
+You can change the values there and pass it to the example program using
+to `-c` argument. If using the library directly, the path to this file
+should be passed to the constructor (or pass an empty string to use
+default values).
+
+## License
+
+The library itself is provided under the MIT license. By "the library itself"
+I refer to the following files that I have provided under this repo:
+
+ * src/facial_landmark_detector.cpp
+ * src/math_utils.h
+ * include/facial_landmark_detector.h
+ * and if you decide to build the binary for the library, the resulting
+   binary file (typically build/libFacialLandmarksForCubism.a)
+
+The license text can be found in LICENSE-MIT.txt, and also at the top of
+the .cpp and .h files.
+
+The library makes use of the dlib library, provided here as a Git
+submodule, which is used under the Boost Software License, version 1.0.
+The full license text can be found under lib/dlib/dlib/LICENSE.txt.
+
+The example program is a patched version of the sample program provided
+by Live2D (because there's really no point in reinventing the wheel),
+and as such, as per the licensing restrictions by Live2D, is still the
+property of Live2D.
+
+The patch file (example/demo.patch) contains lines showing additions by
+me, as well as deleted lines and unchanged lines for context. The deleted
+and unchanged lines are obviously still owned by Live2D. For my additions,
+where substantial enough for me to claim ownership, I release them under
+the Do What the Fuck You Want to Public License, version 2. The full license
+text can be found in LICENSE-WTFPL.txt.
+
+All other files not mentioned above that I have provided in this repo
+(i.e. not downloaded and placed here by you), *excluding* the two license
+documents and files generated by Git, are also released under the Do What
+the Fuck You Want to Public License, version 2, whose full license text
+can be found in LICENSE-WTFPL.txt.
+
+In order to use example program, or in any other way use this library
+with the Live2D® Cubism SDK, you must agree to the license by Live2D Inc.
+Their licenses can be found here:
+<https://www.live2d.com/en/download/cubism-sdk/download-native/>.
+
+The library requires a facial landmark dataset, and the one provided by
+dlib (which is derived from a dataset owned by Imperial College London)
+has been used in development. The license for this dataset excludes
+commercial use. You must obtain an alternative dataset if you wish to
+use this library commercially.
+
+This is not a license requirement, but if you find my library useful,
+I'd love to hear from you! Send me an email at spam(at)adrianiainlam.tk --
+replacing "spam" with the name of this repo :).
+
+## Contributions
+
+Contributions welcome! This is only a hobby weekend project so I don't
+really have many environments / faces to test it on. Feel free to submit
+issues or pull requests on GitHub, or send questions or patches to me
+(see my email address above) if you prefer email. Thanks :)
+
diff --git a/block_diagram.png b/block_diagram.png
new file mode 100644 (file)
index 0000000..b4bfdd4
Binary files /dev/null and b/block_diagram.png differ
diff --git a/build.sh b/build.sh
new file mode 100755 (executable)
index 0000000..ea0a023
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+mkdir -p build && cd build
+
+cmake .. -D CMAKE_BUILD_TYPE=Release -D USE_AVX_INSTRUCTIONS=1
+cmake --build . -j6
diff --git a/config.txt b/config.txt
new file mode 100644 (file)
index 0000000..915485e
--- /dev/null
@@ -0,0 +1,140 @@
+# Config file for FacialLandmarksForCubism
+
+# The path of this config file should be passed to the constructor
+# of the FacialLandmarkDetector.
+
+# Comments are lines that start with a '#' and are ignored by the parser.
+# Note that a line will be considered as a comment ONLY IF the '#' is the
+# very first character of the line, i.e. without any preceeding whitespace.
+
+
+## Section 1: dlib face detection and webcam parameters
+
+# Path to the dlib shape predictor trained dataset
+predictorPath ./shape_predictor_68_face_landmarks.dat
+
+# Value passed to the cv::VideoCapture() ctor
+cvVideoCaptureId 0
+
+# Number of milliseconds to wait after processing each video frame
+# This value controls the frame rate, but the actual frame period
+# is longer due to the time required to process each frame
+cvWaitKeyMs 5
+
+# If 1, show the webcam captured video on-screen; if 0, don't show
+showWebcamVideo 1
+
+# If 1, draw the detected facial landmarks on-screen; if 0, don't draw
+renderLandmarksOnVideo 1
+
+# If 1, laterally invert the image (create a mirror image); if 0, don't invert
+lateralInversion 1
+
+
+## Section 2: Cubism params calculation control
+#
+# These values control how the facial landmarks are translated into
+# parameters that control the Cubism model, and will vary from person
+# to person. The following values seem to work OK for my face, but
+# your milage may vary.
+
+# Section 2.1: Face Y direction angle (head pointing up/down)
+# The Y angle is calculated mainly based on the angle formed
+# by the corners and the tip of the nose (hereafter referred
+# to as the "nose angle").
+
+# This applies an offset (in degrees).
+# If you have a webcam at the top of your monitor, then it is likely
+# that when you look at the centre of your monitor, the captured image
+# will have you looking downwards. This offset shifts the angle upwards,
+# so that the resulting avatar will still be looking straight ahead.
+faceYAngleCorrection 10
+
+# This is the baseline value for the nose angle (in radians) when looking
+# straight ahead...
+faceYAngleZeroValue 1.8
+
+# ... and this is when you are looking up...
+faceYAngleUpThreshold 1.3
+
+# ... and when looking down.
+faceYAngleDownThreshold 2.3
+
+# This is an additional multiplication factor applied per degree of rotation
+# in the X direction (left/right) - since the nose angle reduces when
+# turning your head left/right.
+faceYAngleXRotCorrection 0.15
+
+# This is the multiplication factor to reduce by when smiling or laughing -
+# the nose angle increases in such cases.
+faceYAngleSmileCorrection 0.075
+
+
+# Section 2.2: Eye control
+# This is mainly calculated based on the eye aspect ratio (eye height
+# divided by eye width). Note that currently an average of the values
+# of both eyes is applied - mainly due to two reasons: (1) the dlib
+# dataset I'm using fails to detect winks for me, and (2) if this is
+# not done, I frequently get asynchronous blinks which just looks ugly.
+
+# Maximum eye aspect ratio when the eye is closed
+eyeClosedThreshold 0.2
+
+# Minimum eye aspect ratio when the eye is open
+eyeOpenThreshold 0.25
+
+# Max eye aspect ratio to switch to a closed "smiley eye"
+eyeSmileEyeOpenThreshold 0.6
+
+# Min "mouth form" value to switch to a closed "smiley eye"
+# "Mouth form" is 1 when fully smiling / laughing, and 0 when normal
+eyeSmileMouthFormThreshold 0.75
+
+# Min "mouth open" value to switch to a closed "smiley eye"
+# "Mouth open" is 1 when fully open, and 0 when closed
+eyeSmileMouthOpenThreshold 0.5
+
+
+# Section 2.3: Mouth control
+# Two parameters are passed to Cubism to control the mouth:
+#  - mouth form: Controls smiles / laughs
+#  - mouth openness: How widely open the mouth is
+# Mouth form is calculated by the ratio between the mouth width
+# and the eye separation (distance between the two eyes).
+# Mouth openness is calculated by the ratio between the lip separation
+# (distance between upper and lower lips) and the mouth width.
+
+# Max mouth-width-to-eye-separation ratio to have a normal resting mouth
+mouthNormalThreshold 0.75
+
+# Min mouth-width-to-eye-separation ratio to have a fully smiling
+# or laughing mouth
+mouthSmileThreshold 1.0
+
+# Max lip-separation-to-mouth-width ratio to have a closed mouth
+mouthClosedThreshold 0.1
+
+# Min lip-separation-to-mouth-width ratio to have a fully opened mouth
+mouthOpenThreshold 0.4
+
+# Additional multiplication factor applied to the mouth openness parameter
+# when the mouth is fully smiling / laughing, since doing so increases
+# the mouth width
+mouthOpenLaughCorrection 0.2
+
+
+## Section 3: Filtering parameters
+# The facial landmark coordinates can be quite noisy, so I've applied
+# a simple moving average filter to reduce noise. More taps would mean
+# more samples to average over, hence smoother movements with less noise,
+# but it will also cause more lag between your movement and the movement
+# of the avatar, and quick movements (e.g. blinks) may be completely missed.
+
+faceXAngleNumTaps 11
+faceYAngleNumTaps 11
+faceZAngleNumTaps 11
+mouthFormNumTaps 3
+mouthOpenNumTaps 3
+leftEyeOpenNumTaps 3
+rightEyeOpenNumTaps 3
+
diff --git a/example/build.sh b/example/build.sh
new file mode 100755 (executable)
index 0000000..2902456
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+mkdir -p demo_build
+cp -r CubismSdkForNative-4-r.1/Samples/OpenGL/Demo/proj.linux.cmake/* ./demo_build/
+patch -d demo_build -p2 < demo.patch
+./demo_build/scripts/make_gcc
diff --git a/example/demo.patch b/example/demo.patch
new file mode 100644 (file)
index 0000000..186b8cb
--- /dev/null
@@ -0,0 +1,1209 @@
+diff -pruN --exclude build ./demo_clean/CMakeLists.txt ./demo_dev/CMakeLists.txt
+--- ./demo_clean/CMakeLists.txt        2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/CMakeLists.txt  2020-07-11 22:52:49.099117981 +0100
+@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.16)
+ # Set app name.
+ set(APP_NAME Demo)
+ # Set directory paths.
+-set(SDK_ROOT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../../..)
++set(SDK_ROOT_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../CubismSdkForNative-4-r.1)
+ set(CORE_PATH ${SDK_ROOT_PATH}/Core)
+ set(FRAMEWORK_PATH ${SDK_ROOT_PATH}/Framework)
+ set(THIRD_PARTY_PATH ${SDK_ROOT_PATH}/Samples/OpenGL/thirdParty)
+@@ -32,7 +32,7 @@ set(GLFW_INSTALL OFF CACHE BOOL "" FORCE
+ set(BUILD_UTILS OFF CACHE BOOL "" FORCE)
+ # Specify version of compiler.
+-set(CMAKE_CXX_STANDARD 14)
++set(CMAKE_CXX_STANDARD 17)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+@@ -64,6 +64,9 @@ target_link_libraries(Framework Live2DCu
+ # Find opengl libraries.
+ find_package(OpenGL REQUIRED)
++# Add FacialLandmarksForCubism
++add_subdirectory(../.. FacialLandmarksForCubism_build)
++
+ # Make executable app.
+ add_executable(${APP_NAME})
+ # Add source files.
+@@ -73,9 +76,11 @@ target_link_libraries(${APP_NAME}
+   Framework
+   glfw
+   ${OPENGL_LIBRARIES}
++  FacialLandmarksForCubism
++  stdc++fs
+ )
+ # Specify include directories.
+-target_include_directories(${APP_NAME} PRIVATE ${STB_PATH})
++target_include_directories(${APP_NAME} PRIVATE ${STB_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/../../include)
+ # Copy resource directory to build directory.
+ add_custom_command(
+@@ -86,6 +91,17 @@ add_custom_command(
+       copy_directory ${RES_PATH} $<TARGET_FILE_DIR:${APP_NAME}>/Resources
+ )
++# Copy shape predictor trained dataset to build directory
++set(DLIB_SHAPE_PREDICTOR_DATA ${CMAKE_CURRENT_SOURCE_DIR}/../shape_predictor_68_face_landmarks.dat
++    CACHE FILEPATH "Path to dlib shape predictor trained dataset")
++add_custom_command(
++  TARGET ${APP_NAME}
++  POST_BUILD
++  COMMAND
++    ${CMAKE_COMMAND} -E
++      copy ${DLIB_SHAPE_PREDICTOR_DATA} $<TARGET_FILE_DIR:${APP_NAME}>/
++)
++
+ # You can change target that renderer draws by enabling following definition.
+ #
+ # * USE_RENDER_TARGET
+diff -pruN --exclude build ./demo_clean/scripts/make_gcc ./demo_dev/scripts/make_gcc
+--- ./demo_clean/scripts/make_gcc      2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/scripts/make_gcc        2020-07-11 21:22:23.615043956 +0100
+@@ -9,5 +9,6 @@ BUILD_PATH=$SCRIPT_PATH/../build/make_gc
+ # Run CMake.
+ cmake -S "$CMAKE_PATH" \
+   -B "$BUILD_PATH" \
+-  -D CMAKE_BUILD_TYPE=Release
+-cd "$BUILD_PATH" && make
++  -D CMAKE_BUILD_TYPE=Release \
++  -D USE_AVX_INSTRUCTIONS=1
++cd "$BUILD_PATH" && make -j4
+diff -pruN --exclude build ./demo_clean/src/CMakeLists.txt ./demo_dev/src/CMakeLists.txt
+--- ./demo_clean/src/CMakeLists.txt    2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/CMakeLists.txt      2020-07-11 17:39:18.358435702 +0100
+@@ -19,6 +19,4 @@ target_sources(${APP_NAME}
+     ${CMAKE_CURRENT_SOURCE_DIR}/LAppView.cpp
+     ${CMAKE_CURRENT_SOURCE_DIR}/LAppView.hpp
+     ${CMAKE_CURRENT_SOURCE_DIR}/main.cpp
+-    ${CMAKE_CURRENT_SOURCE_DIR}/TouchManager.cpp
+-    ${CMAKE_CURRENT_SOURCE_DIR}/TouchManager.hpp
+ )
+diff -pruN --exclude build ./demo_clean/src/LAppDelegate.cpp ./demo_dev/src/LAppDelegate.cpp
+--- ./demo_clean/src/LAppDelegate.cpp  2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppDelegate.cpp    2020-07-11 17:35:02.414902548 +0100
+@@ -45,7 +45,8 @@ void LAppDelegate::ReleaseInstance()
+     s_instance = NULL;
+ }
+-bool LAppDelegate::Initialize()
++bool LAppDelegate::Initialize(int initWindowWidth, int initWindowHeight,
++                              const char *windowTitle)
+ {
+     if (DebugLogEnable)
+     {
+@@ -63,7 +64,13 @@ bool LAppDelegate::Initialize()
+     }
+     // Windowの生成_
+-    _window = glfwCreateWindow(RenderTargetWidth, RenderTargetHeight, "SAMPLE", NULL, NULL);
++    _window = glfwCreateWindow(
++        initWindowWidth ? initWindowWidth : RenderTargetWidth,
++        initWindowHeight ? initWindowHeight : RenderTargetHeight,
++        windowTitle ? windowTitle : "SAMPLE",
++        NULL,
++        NULL);
++
+     if (_window == NULL)
+     {
+         if (DebugLogEnable)
+@@ -95,10 +102,6 @@ bool LAppDelegate::Initialize()
+     glEnable(GL_BLEND);
+     glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+-    //コールバック関数の登録
+-    glfwSetMouseButtonCallback(_window, EventHandler::OnMouseCallBack);
+-    glfwSetCursorPosCallback(_window, EventHandler::OnMouseCallBack);
+-
+     // ウィンドウサイズ記憶
+     int width, height;
+     glfwGetWindowSize(LAppDelegate::GetInstance()->GetWindow(), &width, &height);
+@@ -111,8 +114,6 @@ bool LAppDelegate::Initialize()
+     // Cubism3の初期化
+     InitializeCubism();
+-    SetRootDirectory();
+-
+     //load model
+     LAppLive2DManager::GetInstance();
+@@ -214,49 +215,6 @@ void LAppDelegate::InitializeCubism()
+     LAppPal::UpdateTime();
+ }
+-void LAppDelegate::OnMouseCallBack(GLFWwindow* window, int button, int action, int modify)
+-{
+-    if (_view == NULL)
+-    {
+-        return;
+-    }
+-    if (GLFW_MOUSE_BUTTON_LEFT != button)
+-    {
+-        return;
+-    }
+-
+-    if (GLFW_PRESS == action)
+-    {
+-        _captured = true;
+-        _view->OnTouchesBegan(_mouseX, _mouseY);
+-    }
+-    else if (GLFW_RELEASE == action)
+-    {
+-        if (_captured)
+-        {
+-            _captured = false;
+-            _view->OnTouchesEnded(_mouseX, _mouseY);
+-        }
+-    }
+-}
+-
+-void LAppDelegate::OnMouseCallBack(GLFWwindow* window, double x, double y)
+-{
+-    _mouseX = static_cast<float>(x);
+-    _mouseY = static_cast<float>(y);
+-
+-    if (!_captured)
+-    {
+-        return;
+-    }
+-    if (_view == NULL)
+-    {
+-        return;
+-    }
+-
+-    _view->OnTouchesMoved(_mouseX, _mouseY);
+-}
+-
+ GLuint LAppDelegate::CreateShader()
+ {
+     //バーテックスシェーダのコンパイル
+@@ -299,29 +257,9 @@ GLuint LAppDelegate::CreateShader()
+     return programId;
+ }
+-void LAppDelegate::SetRootDirectory()
++void LAppDelegate::SetRootDirectory(std::string rootDir)
+ {
+-    char path[1024];
+-    ssize_t len = readlink("/proc/self/exe", path, 1024 - 1);
+-
+-    if (len != -1)
+-    {
+-        path[len] = '\0';
+-    }
+-
+-    std::string pathString(path);
+-
+-    pathString = pathString.substr(0, pathString.rfind("Demo"));
+-    Csm::csmVector<string> splitStrings = this->Split(pathString, '/');
+-
+-    this->_rootDirectory = "";
+-
+-    for(int i = 0; i < splitStrings.GetSize(); i++)
+-    {
+-        this->_rootDirectory = this->_rootDirectory + "/" +splitStrings[i];
+-    }
+-
+-    this->_rootDirectory += "/";
++    this->_rootDirectory = rootDir + "/";
+ }
+ Csm::csmVector<string> LAppDelegate::Split(const std::string& baseString, char delimiter)
+diff -pruN --exclude build ./demo_clean/src/LAppDelegate.hpp ./demo_dev/src/LAppDelegate.hpp
+--- ./demo_clean/src/LAppDelegate.hpp  2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppDelegate.hpp    2020-07-11 17:34:40.778602504 +0100
+@@ -40,7 +40,8 @@ public:
+     /**
+     * @brief   APPに必要なものを初期化する。
+     */
+-    bool Initialize();
++    bool Initialize(int initWindowWidth = 0, int initWindowHeight = 0,
++                    const char *windowTitle = "SAMPLE");
+     /**
+     * @brief   解放する。
+@@ -53,25 +54,6 @@ public:
+     void Run();
+     /**
+-    * @brief   OpenGL用 glfwSetMouseButtonCallback用関数。
+-    *
+-    * @param[in]       window            コールバックを呼んだWindow情報
+-    * @param[in]       button            ボタン種類
+-    * @param[in]       action            実行結果
+-    * @param[in]       modify
+-    */
+-    void OnMouseCallBack(GLFWwindow* window, int button, int action, int modify);
+-
+-    /**
+-    * @brief   OpenGL用 glfwSetCursorPosCallback用関数。
+-    *
+-    * @param[in]       window            コールバックを呼んだWindow情報
+-    * @param[in]       x                 x座標
+-    * @param[in]       y                 x座標
+-    */
+-    void OnMouseCallBack(GLFWwindow* window, double x, double y);
+-
+-    /**
+     * @brief シェーダーを登録する。
+     */
+     GLuint CreateShader();
+@@ -98,8 +80,10 @@ public:
+     /**
+      * @brief   ルートディレクトリを設定する。
++     *
++     * @param[in] rootDir : The root directory to set to.
+      */
+-    void SetRootDirectory();
++    void SetRootDirectory(std::string rootDir);
+     /**
+      * @brief   ルートディレクトリを取得する。
+@@ -146,24 +130,3 @@ private:
+     int _windowWidth;                            ///< Initialize関数で設定したウィンドウ幅
+     int _windowHeight;                           ///< Initialize関数で設定したウィンドウ高さ
+ };
+-
+-class EventHandler
+-{
+-public:
+-    /**
+-    * @brief   glfwSetMouseButtonCallback用コールバック関数。
+-    */
+-    static void OnMouseCallBack(GLFWwindow* window, int button, int action, int modify)
+-    {
+-        LAppDelegate::GetInstance()->OnMouseCallBack(window, button, action, modify);
+-    }
+-
+-    /**
+-    * @brief   glfwSetCursorPosCallback用コールバック関数。
+-    */
+-    static void OnMouseCallBack(GLFWwindow* window, double x, double y)
+-    {
+-         LAppDelegate::GetInstance()->OnMouseCallBack(window, x, y);
+-    }
+-
+-};
+diff -pruN --exclude build ./demo_clean/src/LAppLive2DManager.cpp ./demo_dev/src/LAppLive2DManager.cpp
+--- ./demo_clean/src/LAppLive2DManager.cpp     2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppLive2DManager.cpp       2020-07-11 23:20:11.548419176 +0100
+@@ -52,9 +52,10 @@ void LAppLive2DManager::ReleaseInstance(
+ LAppLive2DManager::LAppLive2DManager()
+     : _viewMatrix(NULL)
+-    , _sceneIndex(0)
++    , _projScaleFactor(1.0f)
++    , _translateX(0.0f)
++    , _translateY(0.0f)
+ {
+-    ChangeScene(_sceneIndex);
+ }
+ LAppLive2DManager::~LAppLive2DManager()
+@@ -98,26 +99,6 @@ void LAppLive2DManager::OnTap(csmFloat32
+     {
+         LAppPal::PrintLog("[APP]tap point: {x:%.2f y:%.2f}", x, y);
+     }
+-
+-    for (csmUint32 i = 0; i < _models.GetSize(); i++)
+-    {
+-        if (_models[i]->HitTest(HitAreaNameHead, x, y))
+-        {
+-            if (DebugLogEnable)
+-            {
+-                LAppPal::PrintLog("[APP]hit area: [%s]", HitAreaNameHead);
+-            }
+-            _models[i]->SetRandomExpression();
+-        }
+-        else if (_models[i]->HitTest(HitAreaNameBody, x, y))
+-        {
+-            if (DebugLogEnable)
+-            {
+-                LAppPal::PrintLog("[APP]hit area: [%s]", HitAreaNameBody);
+-            }
+-            _models[i]->StartRandomMotion(MotionGroupTapBody, PriorityNormal, FinishedMotion);
+-        }
+-    }
+ }
+ void LAppLive2DManager::OnUpdate() const
+@@ -125,7 +106,9 @@ void LAppLive2DManager::OnUpdate() const
+     CubismMatrix44 projection;
+     int width, height;
+     glfwGetWindowSize(LAppDelegate::GetInstance()->GetWindow(), &width, &height);
+-    projection.Scale(1.0f, static_cast<float>(width) / static_cast<float>(height));
++    projection.Scale(_projScaleFactor,
++                     _projScaleFactor * static_cast<float>(width) / static_cast<float>(height));
++    projection.Translate(_translateX, _translateY);
+     if (_viewMatrix != NULL)
+     {
+@@ -148,26 +131,10 @@ void LAppLive2DManager::OnUpdate() const
+     }
+ }
+-void LAppLive2DManager::NextScene()
+-{
+-    csmInt32 no = (_sceneIndex + 1) % ModelDirSize;
+-    ChangeScene(no);
+-}
+-
+-void LAppLive2DManager::ChangeScene(Csm::csmInt32 index)
++void LAppLive2DManager::SetModel(std::string modelName)
+ {
+-    _sceneIndex = index;
+-    if (DebugLogEnable)
+-    {
+-        LAppPal::PrintLog("[APP]model index: %d", _sceneIndex);
+-    }
+-
+-    // ModelDir[]に保持したディレクトリ名から
+-    // model3.jsonのパスを決定する.
+-    // ディレクトリ名とmodel3.jsonの名前を一致させておくこと.
+-    std::string model = ModelDir[index];
+-    std::string modelPath = LAppDelegate::GetInstance()->GetRootDirectory() + ResourcesPath + model + "/";
+-    std::string modelJsonName = ModelDir[index];
++    std::string modelPath = LAppDelegate::GetInstance()->GetRootDirectory() + ResourcesPath + modelName + "/";
++    std::string modelJsonName = modelName;
+     modelJsonName += ".model3.json";
+     ReleaseAllModel();
+@@ -215,3 +182,20 @@ csmUint32 LAppLive2DManager::GetModelNum
+ {
+     return _models.GetSize();
+ }
++
++void LAppLive2DManager::SetFacialLandmarkDetector(FacialLandmarkDetector *detector)
++{
++    for (auto it = _models.Begin(); it != _models.End(); ++it)
++    {
++        (*it)->SetFacialLandmarkDetector(detector);
++    }
++}
++
++void LAppLive2DManager::SetProjectionScaleTranslate(float scaleFactor,
++                                                    float translateX,
++                                                    float translateY)
++{
++    _projScaleFactor = scaleFactor;
++    _translateX = translateX;
++    _translateY = translateY;
++}
+diff -pruN --exclude build ./demo_clean/src/LAppLive2DManager.hpp ./demo_dev/src/LAppLive2DManager.hpp
+--- ./demo_clean/src/LAppLive2DManager.hpp     2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppLive2DManager.hpp       2020-07-11 23:21:17.969484538 +0100
+@@ -6,12 +6,15 @@
+  */
+ #pragma once
++#include <string>
+ #include <CubismFramework.hpp>
+ #include <Math/CubismMatrix44.hpp>
+ #include <Type/csmVector.hpp>
+ class LAppModel;
++class FacialLandmarkDetector;
++
+ /**
+ * @brief サンプルアプリケーションにおいてCubismModelを管理するクラス<br>
+ *         モデル生成と破棄、タップイベントの処理、モデル切り替えを行う。
+@@ -72,16 +75,12 @@ public:
+     void OnUpdate() const;
+     /**
+-    * @brief   次のシーンに切り替える<br>
+-    *           サンプルアプリケーションではモデルセットの切り替えを行う。
+-    */
+-    void NextScene();
+-
+-    /**
+-    * @brief   シーンを切り替える<br>
+-    *           サンプルアプリケーションではモデルセットの切り替えを行う。
+-    */
+-    void ChangeScene(Csm::csmInt32 index);
++     * @brief Set model data
++     *
++     * @param[in] modelName : Name of model, should be the same for both
++     *                        the directory and the model3.json file
++     */
++    void SetModel(std::string modelName);
+     /**
+      * @brief   モデル個数を得る
+@@ -89,6 +88,24 @@ public:
+      */
+     Csm::csmUint32 GetModelNum() const;
++    /**
++     * @brief Set the pointer to the FacialLandmarkDetector instance
++     *
++     * @param[in] detector : Pointer to FacialLandmarkDetector instance
++     */
++    void SetFacialLandmarkDetector(FacialLandmarkDetector *detector);
++
++    /**
++     * @brief Set projection scale factor and translation parameters
++     *
++     * @param[in] scaleFactor : Scale factor applied in both X and Y directions
++     * @param[in] translateX : Translation in X direction
++     * @param[in] translateY : Translation in Y direction
++     */
++    void SetProjectionScaleTranslate(float scaleFactor,
++                                     float translateX,
++                                     float translateY);
++
+ private:
+     /**
+     * @brief  コンストラクタ
+@@ -102,5 +119,8 @@ private:
+     Csm::CubismMatrix44*        _viewMatrix; ///< モデル描画に用いるView行列
+     Csm::csmVector<LAppModel*>  _models; ///< モデルインスタンスのコンテナ
+-    Csm::csmInt32               _sceneIndex; ///< 表示するシーンのインデックス値
++
++    float _projScaleFactor;
++    float _translateX;
++    float _translateY;
+ };
+diff -pruN --exclude build ./demo_clean/src/LAppModel.cpp ./demo_dev/src/LAppModel.cpp
+--- ./demo_clean/src/LAppModel.cpp     2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppModel.cpp       2020-07-11 15:57:43.784019311 +0100
+@@ -21,6 +21,8 @@
+ #include "LAppTextureManager.hpp"
+ #include "LAppDelegate.hpp"
++#include "facial_landmark_detector.h"
++
+ using namespace Live2D::Cubism::Framework;
+ using namespace Live2D::Cubism::Framework::DefaultParameterId;
+ using namespace LAppDefine;
+@@ -128,30 +130,6 @@ void LAppModel::SetupModel(ICubismModelS
+         DeleteBuffer(buffer, path.GetRawString());
+     }
+-    //Expression
+-    if (_modelSetting->GetExpressionCount() > 0)
+-    {
+-        const csmInt32 count = _modelSetting->GetExpressionCount();
+-        for (csmInt32 i = 0; i < count; i++)
+-        {
+-            csmString name = _modelSetting->GetExpressionName(i);
+-            csmString path = _modelSetting->GetExpressionFileName(i);
+-            path = _modelHomeDir + path;
+-
+-            buffer = CreateBuffer(path.GetRawString(), &size);
+-            ACubismMotion* motion = LoadExpression(buffer, size, name.GetRawString());
+-
+-            if (_expressions[name] != NULL)
+-            {
+-                ACubismMotion::Delete(_expressions[name]);
+-                _expressions[name] = NULL;
+-            }
+-            _expressions[name] = motion;
+-
+-            DeleteBuffer(buffer, path.GetRawString());
+-        }
+-    }
+-
+     //Physics
+     if (strcmp(_modelSetting->GetPhysicsFileName(), "") != 0)
+     {
+@@ -174,27 +152,6 @@ void LAppModel::SetupModel(ICubismModelS
+         DeleteBuffer(buffer, path.GetRawString());
+     }
+-    //EyeBlink
+-    if (_modelSetting->GetEyeBlinkParameterCount() > 0)
+-    {
+-        _eyeBlink = CubismEyeBlink::Create(_modelSetting);
+-    }
+-
+-    //Breath
+-    {
+-        _breath = CubismBreath::Create();
+-
+-        csmVector<CubismBreath::BreathParameterData> breathParameters;
+-
+-        breathParameters.PushBack(CubismBreath::BreathParameterData(_idParamAngleX, 0.0f, 15.0f, 6.5345f, 0.5f));
+-        breathParameters.PushBack(CubismBreath::BreathParameterData(_idParamAngleY, 0.0f, 8.0f, 3.5345f, 0.5f));
+-        breathParameters.PushBack(CubismBreath::BreathParameterData(_idParamAngleZ, 0.0f, 10.0f, 5.5345f, 0.5f));
+-        breathParameters.PushBack(CubismBreath::BreathParameterData(_idParamBodyAngleX, 0.0f, 4.0f, 15.5345f, 0.5f));
+-        breathParameters.PushBack(CubismBreath::BreathParameterData(CubismFramework::GetIdManager()->GetId(ParamBreath), 0.5f, 0.5f, 3.2345f, 0.5f));
+-
+-        _breath->SetParameters(breathParameters);
+-    }
+-
+     //UserData
+     if (strcmp(_modelSetting->GetUserDataFile(), "") != 0)
+     {
+@@ -205,24 +162,6 @@ void LAppModel::SetupModel(ICubismModelS
+         DeleteBuffer(buffer, path.GetRawString());
+     }
+-    // EyeBlinkIds
+-    {
+-        csmInt32 eyeBlinkIdCount = _modelSetting->GetEyeBlinkParameterCount();
+-        for (csmInt32 i = 0; i < eyeBlinkIdCount; ++i)
+-        {
+-            _eyeBlinkIds.PushBack(_modelSetting->GetEyeBlinkParameterId(i));
+-        }
+-    }
+-
+-    // LipSyncIds
+-    {
+-        csmInt32 lipSyncIdCount = _modelSetting->GetLipSyncParameterCount();
+-        for (csmInt32 i = 0; i < lipSyncIdCount; ++i)
+-        {
+-            _lipSyncIds.PushBack(_modelSetting->GetLipSyncParameterId(i));
+-        }
+-    }
+-
+     //Layout
+     csmMap<csmString, csmFloat32> layout;
+     _modelSetting->GetLayoutMap(layout);
+@@ -230,14 +169,6 @@ void LAppModel::SetupModel(ICubismModelS
+     _model->SaveParameters();
+-    for (csmInt32 i = 0; i < _modelSetting->GetMotionGroupCount(); i++)
+-    {
+-        const csmChar* group = _modelSetting->GetMotionGroupName(i);
+-        PreloadMotionGroup(group);
+-    }
+-
+-    _motionManager->StopAllMotions();
+-
+     _updating = false;
+     _initialized = true;
+ }
+@@ -335,59 +266,29 @@ void LAppModel::Update()
+     const csmFloat32 deltaTimeSeconds = LAppPal::GetDeltaTime();
+     _userTimeSeconds += deltaTimeSeconds;
+-    _dragManager->Update(deltaTimeSeconds);
+-    _dragX = _dragManager->GetX();
+-    _dragY = _dragManager->GetY();
+-
+-    // モーションによるパラメータ更新の有無
+-    csmBool motionUpdated = false;
+-
+-    //-----------------------------------------------------------------
+-    _model->LoadParameters(); // 前回セーブされた状態をロード
+-    if (_motionManager->IsFinished())
+-    {
+-        // モーションの再生がない場合、待機モーションの中からランダムで再生する
+-        StartRandomMotion(MotionGroupIdle, PriorityIdle);
+-    }
+-    else
+-    {
+-        motionUpdated = _motionManager->UpdateMotion(_model, deltaTimeSeconds); // モーションを更新
+-    }
+-    _model->SaveParameters(); // 状態を保存
+-    //-----------------------------------------------------------------
+-
+-    // まばたき
+-    if (!motionUpdated)
+-    {
+-        if (_eyeBlink != NULL)
+-        {
+-            // メインモーションの更新がないとき
+-            _eyeBlink->UpdateParameters(_model, deltaTimeSeconds); // 目パチ
+-        }
+-    }
+-
+-    if (_expressionManager != NULL)
++    if (_detector)
+     {
+-        _expressionManager->UpdateMotion(_model, deltaTimeSeconds); // 表情でパラメータ更新(相対変化)
+-    }
+-
+-    //ドラッグによる変化
+-    //ドラッグによる顔の向きの調整
+-    _model->AddParameterValue(_idParamAngleX, _dragX * 30); // -30から30の値を加える
+-    _model->AddParameterValue(_idParamAngleY, _dragY * 30);
+-    _model->AddParameterValue(_idParamAngleZ, _dragX * _dragY * -30);
+-
+-    //ドラッグによる体の向きの調整
+-    _model->AddParameterValue(_idParamBodyAngleX, _dragX * 10); // -10から10の値を加える
+-
+-    //ドラッグによる目の向きの調整
+-    _model->AddParameterValue(_idParamEyeBallX, _dragX); // -1から1の値を加える
+-    _model->AddParameterValue(_idParamEyeBallY, _dragY);
++        auto idMan = CubismFramework::GetIdManager();
++        auto params = _detector->getParams();
+-    // 呼吸など
+-    if (_breath != NULL)
+-    {
+-        _breath->UpdateParameters(_model, deltaTimeSeconds);
++        _model->SetParameterValue(idMan->GetId("ParamEyeLOpen"),
++                                  params.leftEyeOpenness);
++        _model->SetParameterValue(idMan->GetId("ParamEyeROpen"),
++                                  params.rightEyeOpenness);
++        _model->SetParameterValue(idMan->GetId("ParamMouthForm"),
++                                  params.mouthForm);
++        _model->SetParameterValue(idMan->GetId("ParamMouthOpenY"),
++                                  params.mouthOpenness);
++        _model->SetParameterValue(idMan->GetId("ParamEyeLSmile"),
++                                  params.leftEyeSmile);
++        _model->SetParameterValue(idMan->GetId("ParamEyeRSmile"),
++                                  params.rightEyeSmile);
++        _model->SetParameterValue(idMan->GetId("ParamAngleX"),
++                                  params.faceXAngle);
++        _model->SetParameterValue(idMan->GetId("ParamAngleY"),
++                                  params.faceYAngle);
++        _model->SetParameterValue(idMan->GetId("ParamAngleZ"),
++                                  params.faceZAngle);
+     }
+     // 物理演算の設定
+@@ -396,17 +297,6 @@ void LAppModel::Update()
+         _physics->Evaluate(_model, deltaTimeSeconds);
+     }
+-    // リップシンクの設定
+-    if (_lipSync)
+-    {
+-        csmFloat32 value = 0; // リアルタイムでリップシンクを行う場合、システムから音量を取得して0〜1の範囲で値を入力します。
+-
+-        for (csmUint32 i = 0; i < _lipSyncIds.GetSize(); ++i)
+-        {
+-            _model->AddParameterValue(_lipSyncIds[i], value, 0.8f);
+-        }
+-    }
+-
+     // ポーズの設定
+     if (_pose != NULL)
+     {
+@@ -626,3 +516,9 @@ Csm::Rendering::CubismOffscreenFrame_Ope
+ {
+     return _renderBuffer;
+ }
++
++void LAppModel::SetFacialLandmarkDetector(FacialLandmarkDetector *detector)
++{
++    _detector = detector;
++}
++
+diff -pruN --exclude build ./demo_clean/src/LAppModel.hpp ./demo_dev/src/LAppModel.hpp
+--- ./demo_clean/src/LAppModel.hpp     2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppModel.hpp       2020-07-11 15:40:18.977286166 +0100
+@@ -13,6 +13,7 @@
+ #include <Type/csmRectF.hpp>
+ #include <Rendering/OpenGL/CubismOffscreenSurface_OpenGLES2.hpp>
++#include "facial_landmark_detector.h"
+ /**
+  * @brief ユーザーが実際に使用するモデルの実装クラス<br>
+@@ -113,6 +114,13 @@ public:
+      */
+     Csm::Rendering::CubismOffscreenFrame_OpenGLES2& GetRenderBuffer();
++    /**
++     * @brief Set the pointer to the FacialLandmarkDetector instance
++     *
++     * @param[in] detector : Pointer to FacialLandmarkDetector instance
++     */
++    void SetFacialLandmarkDetector(FacialLandmarkDetector *detector);
++
+ protected:
+     /**
+      *  @brief  モデルを描画する処理。モデルを描画する空間のView-Projection行列を渡す。
+@@ -183,6 +191,8 @@ private:
+     const Csm::CubismId* _idParamEyeBallY; ///< パラメータID: ParamEyeBallXY
+     Csm::Rendering::CubismOffscreenFrame_OpenGLES2 _renderBuffer;   ///< フレームバッファ以外の描画先
++
++    FacialLandmarkDetector *_detector;
+ };
+diff -pruN --exclude build ./demo_clean/src/LAppPal.cpp ./demo_dev/src/LAppPal.cpp
+--- ./demo_clean/src/LAppPal.cpp       2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppPal.cpp 2020-07-11 23:29:09.084910139 +0100
+@@ -6,6 +6,7 @@
+  */
+ #include "LAppPal.hpp"
++#include <stdexcept>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
+@@ -45,10 +46,7 @@ csmByte* LAppPal::LoadFileAsBytes(const
+     file.open(path, std::ios::in | std::ios::binary);
+     if (!file.is_open())
+     {
+-        if (DebugLogEnable)
+-        {
+-            PrintLog("file open error");
+-        }
++        throw std::runtime_error("Failed to open file " + filePath);
+         return NULL;
+     }
+     file.read(buf, size);
+diff -pruN --exclude build ./demo_clean/src/LAppTextureManager.cpp ./demo_dev/src/LAppTextureManager.cpp
+--- ./demo_clean/src/LAppTextureManager.cpp    2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppTextureManager.cpp      2020-07-11 22:22:18.004965003 +0100
+@@ -96,6 +96,46 @@ LAppTextureManager::TextureInfo* LAppTex
+ }
++LAppTextureManager::TextureInfo* LAppTextureManager::CreateTextureFromColor(
++    uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha
++)
++{
++    int width = 8, height = 8;
++
++    uint8_t pixels[height][width][4];
++    for (std::size_t h = 0; h < height; h++)
++    {
++        for (std::size_t w = 0; w < width; w++)
++        {
++            pixels[h][w][0] = red;
++            pixels[h][w][1] = green;
++            pixels[h][w][2] = blue;
++            pixels[h][w][3] = alpha;
++        }
++    }
++
++    GLuint textureId;
++    glGenTextures(1, &textureId);
++    glBindTexture(GL_TEXTURE_2D, textureId);
++    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
++
++    glGenerateMipmap(GL_TEXTURE_2D);
++    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
++    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
++    glBindTexture(GL_TEXTURE_2D, 0);
++
++
++    LAppTextureManager::TextureInfo* textureInfo = new LAppTextureManager::TextureInfo();
++    textureInfo->fileName = "";
++    textureInfo->width = width;
++    textureInfo->height = height;
++    textureInfo->id = textureId;
++
++    _textures.PushBack(textureInfo);
++
++    return textureInfo;
++}
++
+ void LAppTextureManager::ReleaseTextures()
+ {
+     for (Csm::csmUint32 i = 0; i < _textures.GetSize(); i++)
+diff -pruN --exclude build ./demo_clean/src/LAppTextureManager.hpp ./demo_dev/src/LAppTextureManager.hpp
+--- ./demo_clean/src/LAppTextureManager.hpp    2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppTextureManager.hpp      2020-07-11 17:36:31.180131039 +0100
+@@ -72,6 +72,8 @@ public:
+     */
+     TextureInfo* CreateTextureFromPngFile(std::string fileName);
++    TextureInfo *CreateTextureFromColor(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha = 255);
++
+     /**
+     * @brief 画像の解放
+     *
+diff -pruN --exclude build ./demo_clean/src/LAppView.cpp ./demo_dev/src/LAppView.cpp
+--- ./demo_clean/src/LAppView.cpp      2020-07-12 16:16:34.003809759 +0100
++++ ./demo_dev/src/LAppView.cpp        2020-07-11 17:38:06.905451955 +0100
+@@ -13,7 +13,6 @@
+ #include "LAppLive2DManager.hpp"
+ #include "LAppTextureManager.hpp"
+ #include "LAppDefine.hpp"
+-#include "TouchManager.hpp"
+ #include "LAppSprite.hpp"
+ #include "LAppModel.hpp"
+@@ -26,8 +25,6 @@ using namespace LAppDefine;
+ LAppView::LAppView():
+     _programId(0),
+     _back(NULL),
+-    _gear(NULL),
+-    _power(NULL),
+     _renderSprite(NULL),
+     _renderTarget(SelectTarget_None)
+ {
+@@ -35,8 +32,6 @@ LAppView::LAppView():
+     _clearColor[1] = 1.0f;
+     _clearColor[2] = 1.0f;
+     _clearColor[3] = 0.0f;
+-    // タッチ関係のイベント管理
+-    _touchManager = new TouchManager();
+     // デバイス座標からスクリーン座標に変換するための
+     _deviceToScreen = new CubismMatrix44();
+@@ -52,10 +47,7 @@ LAppView::~LAppView()
+     delete _viewMatrix;
+     delete _deviceToScreen;
+-    delete _touchManager;
+     delete _back;
+-    delete _gear;
+-    delete _power;
+ }
+ void LAppView::Initialize()
+@@ -97,9 +89,6 @@ void LAppView::Initialize()
+ void LAppView::Render()
+ {
+     _back->Render();
+-    _gear->Render();
+-    _power->Render();
+-
+     LAppLive2DManager* Live2DManager = LAppLive2DManager::GetInstance();
+@@ -139,35 +128,17 @@ void LAppView::InitializeSprite()
+     glfwGetWindowSize(LAppDelegate::GetInstance()->GetWindow(), &width, &height);
+     LAppTextureManager* textureManager = LAppDelegate::GetInstance()->GetTextureManager();
+-    const string resourcesPath = LAppDelegate::GetInstance()->GetRootDirectory() + ResourcesPath;
+-    string imageName = BackImageName;
+-    LAppTextureManager::TextureInfo* backgroundTexture = textureManager->CreateTextureFromPngFile(resourcesPath + imageName);
++
++    LAppTextureManager::TextureInfo* backgroundTexture =
++        textureManager->CreateTextureFromColor(0, 255, 0);
+     float x = width * 0.5f;
+     float y = height * 0.5f;
+-    float fWidth = static_cast<float>(backgroundTexture->width * 2.0f);
+-    float fHeight = static_cast<float>(height) * 0.95f;
++    float fWidth = static_cast<float>(width);
++    float fHeight = static_cast<float>(height);
+     _back = new LAppSprite(x, y, fWidth, fHeight, backgroundTexture->id, _programId);
+-    imageName = GearImageName;
+-    LAppTextureManager::TextureInfo* gearTexture = textureManager->CreateTextureFromPngFile(resourcesPath + imageName);
+-
+-    x = static_cast<float>(width - gearTexture->width * 0.5f);
+-    y = static_cast<float>(height - gearTexture->height * 0.5f);
+-    fWidth = static_cast<float>(gearTexture->width);
+-    fHeight = static_cast<float>(gearTexture->height);
+-    _gear = new LAppSprite(x, y, fWidth, fHeight, gearTexture->id, _programId);
+-
+-    imageName = PowerImageName;
+-    LAppTextureManager::TextureInfo* powerTexture = textureManager->CreateTextureFromPngFile(resourcesPath + imageName);
+-
+-    x = static_cast<float>(width - powerTexture->width * 0.5f);
+-    y = static_cast<float>(powerTexture->height * 0.5f);
+-    fWidth = static_cast<float>(powerTexture->width);
+-    fHeight = static_cast<float>(powerTexture->height);
+-    _power = new LAppSprite(x, y, fWidth, fHeight, powerTexture->id, _programId);
+-
+     // 画面全体を覆うサイズ
+     x = width * 0.5f;
+     y = height * 0.5f;
+@@ -175,52 +146,6 @@ void LAppView::InitializeSprite()
+ }
+-void LAppView::OnTouchesBegan(float px, float py) const
+-{
+-    _touchManager->TouchesBegan(px, py);
+-}
+-
+-void LAppView::OnTouchesMoved(float px, float py) const
+-{
+-    float viewX = this->TransformViewX(_touchManager->GetX());
+-    float viewY = this->TransformViewY(_touchManager->GetY());
+-
+-    _touchManager->TouchesMoved(px, py);
+-
+-    LAppLive2DManager* Live2DManager = LAppLive2DManager::GetInstance();
+-    Live2DManager->OnDrag(viewX, viewY);
+-}
+-
+-void LAppView::OnTouchesEnded(float px, float py) const
+-{
+-    // タッチ終了
+-    LAppLive2DManager* live2DManager = LAppLive2DManager::GetInstance();
+-    live2DManager->OnDrag(0.0f, 0.0f);
+-    {
+-
+-        // シングルタップ
+-        float x = _deviceToScreen->TransformX(_touchManager->GetX()); // 論理座標変換した座標を取得。
+-        float y = _deviceToScreen->TransformY(_touchManager->GetY()); // 論理座標変換した座標を取得。
+-        if (DebugTouchLogEnable)
+-        {
+-            LAppPal::PrintLog("[APP]touchesEnded x:%.2f y:%.2f", x, y);
+-        }
+-        live2DManager->OnTap(x, y);
+-
+-        // 歯車にタップしたか
+-        if (_gear->IsHit(px, py))
+-        {
+-            live2DManager->NextScene();
+-        }
+-
+-        // 電源ボタンにタップしたか
+-        if (_power->IsHit(px, py))
+-        {
+-            LAppDelegate::GetInstance()->AppEnd();
+-        }
+-    }
+-}
+-
+ float LAppView::TransformViewX(float deviceX) const
+ {
+     float screenX = _deviceToScreen->TransformX(deviceX); // 論理座標変換した座標を取得。
+@@ -362,32 +287,4 @@ void LAppView::ResizeSprite()
+             _back->ResetRect(x, y, fWidth, fHeight);
+         }
+     }
+-
+-    if (_power)
+-    {
+-        GLuint id = _power->GetTextureId();
+-        LAppTextureManager::TextureInfo* texInfo = textureManager->GetTextureInfoById(id);
+-        if (texInfo)
+-        {
+-            x = static_cast<float>(width - texInfo->width * 0.5f);
+-            y = static_cast<float>(texInfo->height * 0.5f);
+-            fWidth = static_cast<float>(texInfo->width);
+-            fHeight = static_cast<float>(texInfo->height);
+-            _power->ResetRect(x, y, fWidth, fHeight);
+-        }
+-    }
+-
+-    if (_gear)
+-    {
+-        GLuint id = _gear->GetTextureId();
+-        LAppTextureManager::TextureInfo* texInfo = textureManager->GetTextureInfoById(id);
+-        if (texInfo)
+-        {
+-            x = static_cast<float>(width - texInfo->width * 0.5f);
+-            y = static_cast<float>(height - texInfo->height * 0.5f);
+-            fWidth = static_cast<float>(texInfo->width);
+-            fHeight = static_cast<float>(texInfo->height);
+-            _gear->ResetRect(x, y, fWidth, fHeight);
+-        }
+-    }
+ }
+diff -pruN --exclude build ./demo_clean/src/LAppView.hpp ./demo_dev/src/LAppView.hpp
+--- ./demo_clean/src/LAppView.hpp      2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/LAppView.hpp        2020-07-11 17:38:25.541708705 +0100
+@@ -14,7 +14,6 @@
+ #include "CubismFramework.hpp"
+ #include <Rendering/OpenGL/CubismOffscreenSurface_OpenGLES2.hpp>
+-class TouchManager;
+ class LAppSprite;
+ class LAppModel;
+@@ -66,30 +65,6 @@ public:
+     void ResizeSprite();
+     /**
+-    * @brief タッチされたときに呼ばれる。
+-    *
+-    * @param[in]       pointX            スクリーンX座標
+-    * @param[in]       pointY            スクリーンY座標
+-    */
+-    void OnTouchesBegan(float pointX, float pointY) const;
+-
+-    /**
+-    * @brief タッチしているときにポインタが動いたら呼ばれる。
+-    *
+-    * @param[in]       pointX            スクリーンX座標
+-    * @param[in]       pointY            スクリーンY座標
+-    */
+-    void OnTouchesMoved(float pointX, float pointY) const;
+-
+-    /**
+-    * @brief タッチが終了したら呼ばれる。
+-    *
+-    * @param[in]       pointX            スクリーンX座標
+-    * @param[in]       pointY            スクリーンY座標
+-    */
+-    void OnTouchesEnded(float pointX, float pointY) const;
+-
+-    /**
+     * @brief X座標をView座標に変換する。
+     *
+     * @param[in]       deviceX            デバイスX座標
+@@ -147,13 +122,10 @@ public:
+     void SetRenderTargetClearColor(float r, float g, float b);
+ private:
+-    TouchManager* _touchManager;                 ///< タッチマネージャー
+     Csm::CubismMatrix44* _deviceToScreen;    ///< デバイスからスクリーンへの行列
+     Csm::CubismViewMatrix* _viewMatrix;      ///< viewMatrix
+     GLuint _programId;                       ///< シェーダID
+     LAppSprite* _back;                       ///< 背景画像
+-    LAppSprite* _gear;                       ///< ギア画像
+-    LAppSprite* _power;                      ///< 電源画像
+     // レンダリング先を別ターゲットにする方式の場合に使用
+     LAppSprite* _renderSprite;                                  ///< モードによっては_renderBufferのテクスチャを描画
+diff -pruN --exclude build ./demo_clean/src/main.cpp ./demo_dev/src/main.cpp
+--- ./demo_clean/src/main.cpp  2020-07-12 16:16:33.999809687 +0100
++++ ./demo_dev/src/main.cpp    2020-07-12 15:06:29.194034887 +0100
+@@ -5,18 +5,156 @@
+  * that can be found at https://www.live2d.com/eula/live2d-open-software-license-agreement_en.html.
+  */
++#include <thread>
++#include <stdexcept>
++#include <sstream>
++
++#ifdef __cpp_lib_filesystem
++#include <filesystem>
++namespace fs = std::filesystem;
++#else
++#include <experimental/filesystem>
++namespace fs = std::experimental::filesystem;
++#endif
++
++
+ #include "LAppDelegate.hpp"
++#include "LAppLive2DManager.hpp"
++#include "facial_landmark_detector.h"
++
++struct CmdArgs
++{
++    int windowWidth;
++    int windowHeight;
++    std::string windowTitle;
++    std::string rootDir;
++    float scaleFactor;
++    float translateX;
++    float translateY;
++    std::string modelName;
++    std::string cfgPath; // Path to config file for FacialLandmarkDetector
++};
++
++CmdArgs parseArgv(int argc, char *argv[])
++{
++    // I think the command-line args are simple enough to not justify using a library...
++    CmdArgs cmdArgs;
++    // Set default values
++    cmdArgs.windowWidth = 600;
++    cmdArgs.windowHeight = 600;
++    cmdArgs.windowTitle = "FacialLandmarksForCubism example";
++    cmdArgs.rootDir = fs::current_path();
++    cmdArgs.scaleFactor = 8.0f;
++    cmdArgs.translateX = 0.0f;
++    cmdArgs.translateY = -2.8f;
++    cmdArgs.modelName = "Haru";
++    cmdArgs.cfgPath = "";
++
++    int i = 1;
++    while (i < argc)
++    {
++        std::string arg = argv[i];
++        std::stringstream ss;
++
++        if (arg == "--window-width" || arg == "-W") // capital W for consistency with height
++        {
++            ss << argv[i + 1];
++            if (!(ss >> cmdArgs.windowWidth))
++            {
++                throw std::runtime_error("Invalid argument for window width");
++            }
++        }
++        else if (arg == "--window-height" || arg == "-H") // avoiding "-h", typically for help
++        {
++            ss << argv[i + 1];
++            if (!(ss >> cmdArgs.windowHeight))
++            {
++                throw std::runtime_error("Invalid argument for window height");
++            }
++        }
++        else if (arg == "--window-title" || arg == "-t")
++        {
++            cmdArgs.windowTitle = argv[i + 1];
++        }
++        else if (arg == "--root-dir" || arg == "-d")
++        {
++            cmdArgs.rootDir = argv[i + 1];
++        }
++        else if (arg == "--scale-factor" || arg == "-f")
++        {
++            ss << argv[i + 1];
++            if (!(ss >> cmdArgs.scaleFactor))
++            {
++                throw std::runtime_error("Invalid argument for scale factor");
++            }
++        }
++        else if (arg == "--translate-x" || arg == "-x")
++        {
++            ss << argv[i + 1];
++            if (!(ss >> cmdArgs.translateX))
++            {
++                throw std::runtime_error("Invalid argument for translate X");
++            }
++        }
++        else if (arg == "--translate-y" || arg == "-y")
++        {
++            ss << argv[i + 1];
++            if (!(ss >> cmdArgs.translateY))
++            {
++                throw std::runtime_error("Invalid argument for translate Y");
++            }
++        }
++        else if (arg == "--model" || arg == "-m")
++        {
++            cmdArgs.modelName = argv[i + 1];
++        }
++        else if (arg == "--config" || arg == "-c")
++        {
++            cmdArgs.cfgPath = argv[i + 1];
++        }
++        else
++        {
++            throw std::runtime_error("Unrecognized argument: " + arg);
++        }
++
++        i += 2;
++    }
++
++    return cmdArgs;
++}
+ int main(int argc, char* argv[])
+ {
+-    // create the application instance
+-    if (LAppDelegate::GetInstance()->Initialize() == GL_FALSE)
++    auto cmdArgs = parseArgv(argc, argv);
++
++    LAppDelegate *delegate = LAppDelegate::GetInstance();
++
++    if (!delegate->Initialize(cmdArgs.windowWidth,
++                              cmdArgs.windowHeight,
++                              cmdArgs.windowTitle.c_str()))
+     {
+-        return 1;
++        throw std::runtime_error("Unable to initialize LAppDelegate");
+     }
+-    LAppDelegate::GetInstance()->Run();
++    delegate->SetRootDirectory(cmdArgs.rootDir);
++
++    FacialLandmarkDetector detector(cmdArgs.cfgPath);
++
++    std::thread detectorThread(&FacialLandmarkDetector::mainLoop,
++                               &detector);
++
++    LAppLive2DManager *manager = LAppLive2DManager::GetInstance();
++    manager->SetModel(cmdArgs.modelName);
++
++    manager->SetProjectionScaleTranslate(cmdArgs.scaleFactor,
++                                         cmdArgs.translateX,
++                                         cmdArgs.translateY);
++    manager->SetFacialLandmarkDetector(&detector);
++
++    delegate->Run();
++
++    detector.stop();
++    detectorThread.join();
+     return 0;
+ }
+-
diff --git a/example/generate_patch.sh b/example/generate_patch.sh
new file mode 100755 (executable)
index 0000000..068f4b0
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+mkdir -p demo_clean
+cp -r CubismSdkForNative-4-r.1/Samples/OpenGL/Demo/proj.linux.cmake/* ./demo_clean/
+diff -pruN --exclude build ./demo_clean ./demo_dev > ./demo.patch
diff --git a/include/facial_landmark_detector.h b/include/facial_landmark_detector.h
new file mode 100644 (file)
index 0000000..7c6f639
--- /dev/null
@@ -0,0 +1,144 @@
+// -*- mode: c++ -*-
+
+#ifndef __FACIAL_LANDMARK_DETECTOR_H__
+#define __FACIAL_LANDMARK_DETECTOR_H__
+
+/****
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+****/
+
+#include <deque>
+#include <string>
+#include <opencv2/opencv.hpp>
+#include <dlib/image_processing/frontal_face_detector.h>
+#include <dlib/image_processing.h>
+#include <dlib/gui_widgets.h>
+
+class FacialLandmarkDetector
+{
+public:
+    struct Params
+    {
+        double leftEyeOpenness;
+        double rightEyeOpenness;
+        double leftEyeSmile;
+        double rightEyeSmile;
+        double mouthOpenness;
+        double mouthForm;
+        double faceXAngle;
+        double faceYAngle;
+        double faceZAngle;
+        // TODO eyebrows currently not supported...
+        // I'd like to include them, but the dlib detection is very
+        // noisy and inaccurate (at least for my face).
+    };
+
+    FacialLandmarkDetector(std::string cfgPath);
+
+    Params getParams(void) const;
+
+    void stop(void);
+
+    void mainLoop(void);
+
+private:
+    enum LeftRight : bool
+    {
+        LEFT,
+        RIGHT
+    };
+
+    cv::VideoCapture webcam;
+    dlib::image_window win;
+    dlib::frontal_face_detector detector;
+    dlib::shape_predictor predictor;
+    bool m_stop;
+
+    double calcEyeAspectRatio(dlib::point& p1, dlib::point& p2,
+                              dlib::point& p3, dlib::point& p4,
+                              dlib::point& p5, dlib::point& p6) const;
+
+    double calcRightEyeAspectRatio(dlib::full_object_detection& shape) const;
+    double calcLeftEyeAspectRatio(dlib::full_object_detection& shape) const;
+
+    double calcEyeOpenness(LeftRight eye,
+                           dlib::full_object_detection& shape,
+                           double faceYAngle) const;
+
+    double calcMouthForm(dlib::full_object_detection& shape) const;
+    double calcMouthOpenness(dlib::full_object_detection& shape, double mouthForm) const;
+
+    double calcFaceXAngle(dlib::full_object_detection& shape) const;
+    double calcFaceYAngle(dlib::full_object_detection& shape, double faceXAngle, double mouthForm) const;
+    double calcFaceZAngle(dlib::full_object_detection& shape) const;
+
+    void populateDefaultConfig(void);
+    void parseConfig(std::string cfgPath);
+    void throwConfigError(std::string paramName, std::string expectedType,
+                          std::string line, unsigned int lineNum);
+
+
+    std::deque<double> m_leftEyeOpenness;
+    std::deque<double> m_rightEyeOpenness;
+
+    std::deque<double> m_mouthOpenness;
+    std::deque<double> m_mouthForm;
+
+    std::deque<double> m_faceXAngle;
+    std::deque<double> m_faceYAngle;
+    std::deque<double> m_faceZAngle;
+
+    struct Config
+    {
+        int cvVideoCaptureId;
+        std::string predictorPath;
+        double faceYAngleCorrection;
+        double eyeSmileEyeOpenThreshold;
+        double eyeSmileMouthFormThreshold;
+        double eyeSmileMouthOpenThreshold;
+        bool showWebcamVideo;
+        bool renderLandmarksOnVideo;
+        bool lateralInversion;
+        std::size_t faceXAngleNumTaps;
+        std::size_t faceYAngleNumTaps;
+        std::size_t faceZAngleNumTaps;
+        std::size_t mouthFormNumTaps;
+        std::size_t mouthOpenNumTaps;
+        std::size_t leftEyeOpenNumTaps;
+        std::size_t rightEyeOpenNumTaps;
+        int cvWaitKeyMs;
+        double eyeClosedThreshold;
+        double eyeOpenThreshold;
+        double mouthNormalThreshold;
+        double mouthSmileThreshold;
+        double mouthClosedThreshold;
+        double mouthOpenThreshold;
+        double mouthOpenLaughCorrection;
+        double faceYAngleXRotCorrection;
+        double faceYAngleSmileCorrection;
+        double faceYAngleZeroValue;
+        double faceYAngleUpThreshold;
+        double faceYAngleDownThreshold;
+    } m_cfg;
+};
+
+#endif
+
diff --git a/src/faceXAngle.png b/src/faceXAngle.png
new file mode 100644 (file)
index 0000000..c35e26e
Binary files /dev/null and b/src/faceXAngle.png differ
diff --git a/src/facial_landmark_detector.cpp b/src/facial_landmark_detector.cpp
new file mode 100644 (file)
index 0000000..20ec7c8
--- /dev/null
@@ -0,0 +1,731 @@
+/****
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+****/
+
+#include <stdexcept>
+#include <fstream>
+#include <string>
+#include <sstream>
+#include <cmath>
+
+#include <opencv2/opencv.hpp>
+
+#include <dlib/opencv.h>
+#include <dlib/image_processing/frontal_face_detector.h>
+#include <dlib/image_processing.h>
+#include <dlib/image_processing/render_face_detections.h>
+
+#include "facial_landmark_detector.h"
+#include "math_utils.h"
+
+
+static void filterPush(std::deque<double>& buf, double newval,
+                       std::size_t numTaps)
+{
+    buf.push_back(newval);
+    while (buf.size() > numTaps)
+    {
+        buf.pop_front();
+    }
+}
+
+FacialLandmarkDetector::FacialLandmarkDetector(std::string cfgPath)
+    : m_stop(false)
+{
+    parseConfig(cfgPath);
+
+    if (!webcam.open(m_cfg.cvVideoCaptureId))
+    {
+        throw std::runtime_error("Unable to open webcam");
+    }
+
+    detector = dlib::get_frontal_face_detector();
+    dlib::deserialize(m_cfg.predictorPath) >> predictor;
+}
+
+FacialLandmarkDetector::Params FacialLandmarkDetector::getParams(void) const
+{
+    Params params;
+
+    params.faceXAngle = avg(m_faceXAngle);
+    params.faceYAngle = avg(m_faceYAngle) + m_cfg.faceYAngleCorrection;
+    // + 10 correct for angle between computer monitor and webcam
+    params.faceZAngle = avg(m_faceZAngle);
+    params.mouthOpenness = avg(m_mouthOpenness);
+    params.mouthForm = avg(m_mouthForm);
+
+    double leftEye = avg(m_leftEyeOpenness, 1);
+    double rightEye = avg(m_rightEyeOpenness, 1);
+    // Just combine the two to get better synchronized blinks
+    // This effectively disables winks, so if we want to
+    // support winks in the future (see below) we will need
+    // a better way to handle this out-of-sync blinks.
+    double bothEyes = (leftEye + rightEye) / 2;
+    leftEye = bothEyes;
+    rightEye = bothEyes;
+    // Detect winks and make them look better
+    // Commenting out - winks are difficult to be detected by the
+    // dlib data set anyway... maybe in the future we can
+    // add a runtime option to enable/disable...
+    /*if (right == 0 && left > 0.2)
+    {
+        left = 1;
+    }
+    else if (left == 0 && right > 0.2)
+    {
+        right = 1;
+    }
+    */
+    params.leftEyeOpenness = leftEye;
+    params.rightEyeOpenness = rightEye;
+
+    if (leftEye <= m_cfg.eyeSmileEyeOpenThreshold &&
+        rightEye <= m_cfg.eyeSmileEyeOpenThreshold &&
+        params.mouthForm > m_cfg.eyeSmileMouthFormThreshold &&
+        params.mouthOpenness > m_cfg.eyeSmileMouthOpenThreshold)
+    {
+        params.leftEyeSmile = 1;
+        params.rightEyeSmile = 1;
+    }
+    else
+    {
+        params.leftEyeSmile = 0;
+        params.rightEyeSmile = 0;
+    }
+
+    return params;
+}
+
+void FacialLandmarkDetector::stop(void)
+{
+    m_stop = true;
+}
+
+void FacialLandmarkDetector::mainLoop(void)
+{
+    while (!m_stop)
+    {
+        cv::Mat frame;
+        if (!webcam.read(frame))
+        {
+            throw std::runtime_error("Unable to read from webcam");
+        }
+        cv::Mat flipped;
+        if (m_cfg.lateralInversion)
+        {
+            cv::flip(frame, flipped, 1);
+        }
+        else
+        {
+            flipped = frame;
+        }
+        dlib::cv_image<dlib::bgr_pixel> cimg(flipped);
+
+        if (m_cfg.showWebcamVideo)
+        {
+            win.set_image(cimg);
+        }
+
+        std::vector<dlib::rectangle> faces = detector(cimg);
+
+        if (faces.size() > 0)
+        {
+            dlib::rectangle face = faces[0];
+            dlib::full_object_detection shape = predictor(cimg, face);
+
+            /* The coordinates seem to be rather noisy in general.
+             * We will push everything through some moving average filters
+             * to reduce noise. The number of taps is determined empirically
+             * until we get something good.
+             * An alternative method would be to get some better dataset
+             * for dlib - perhaps even to train on a custom data set just for the user.
+             */
+
+            // Face rotation: X direction (left-right)
+            double faceXRot = calcFaceXAngle(shape);
+            filterPush(m_faceXAngle, faceXRot, m_cfg.faceXAngleNumTaps);
+
+            // Mouth form (smile / laugh) detection
+            double mouthForm = calcMouthForm(shape);
+            filterPush(m_mouthForm, mouthForm, m_cfg.mouthFormNumTaps);
+
+            // Face rotation: Y direction (up-down)
+            double faceYRot = calcFaceYAngle(shape, faceXRot, mouthForm);
+            filterPush(m_faceYAngle, faceYRot, m_cfg.faceYAngleNumTaps);
+
+            // Face rotation: Z direction (head tilt)
+            double faceZRot = calcFaceZAngle(shape);
+            filterPush(m_faceZAngle, faceZRot, m_cfg.faceZAngleNumTaps);
+
+            // Mouth openness
+            double mouthOpen = calcMouthOpenness(shape, mouthForm);
+            filterPush(m_mouthOpenness, mouthOpen, m_cfg.mouthOpenNumTaps);
+
+            // Eye openness
+            double eyeLeftOpen = calcEyeOpenness(LEFT, shape, faceYRot);
+            filterPush(m_leftEyeOpenness, eyeLeftOpen, m_cfg.leftEyeOpenNumTaps);
+            double eyeRightOpen = calcEyeOpenness(RIGHT, shape, faceYRot);
+            filterPush(m_rightEyeOpenness, eyeRightOpen, m_cfg.rightEyeOpenNumTaps);
+
+            // TODO eyebrows?
+
+            if (m_cfg.showWebcamVideo && m_cfg.renderLandmarksOnVideo)
+            {
+                win.clear_overlay();
+                win.add_overlay(dlib::render_face_detections(shape));
+            }
+        }
+        else
+        {
+            if (m_cfg.showWebcamVideo && m_cfg.renderLandmarksOnVideo)
+            {
+                win.clear_overlay();
+            }
+        }
+
+        cv::waitKey(m_cfg.cvWaitKeyMs);
+    }
+}
+
+double FacialLandmarkDetector::calcEyeAspectRatio(
+    dlib::point& p1, dlib::point& p2,
+    dlib::point& p3, dlib::point& p4,
+    dlib::point& p5, dlib::point& p6) const
+{
+    double eyeWidth = dist(p1, p4);
+    double eyeHeight1 = dist(p2, p6);
+    double eyeHeight2 = dist(p3, p5);
+
+    return (eyeHeight1 + eyeHeight2) / (2 * eyeWidth);
+}
+
+double FacialLandmarkDetector::calcEyeOpenness(
+    LeftRight eye,
+    dlib::full_object_detection& shape,
+    double faceYAngle) const
+{
+    double eyeAspectRatio;
+    if (eye == LEFT)
+    {
+        eyeAspectRatio = calcEyeAspectRatio(shape.part(42), shape.part(43), shape.part(44),
+                                            shape.part(45), shape.part(46), shape.part(47));
+    }
+    else
+    {
+        eyeAspectRatio = calcEyeAspectRatio(shape.part(36), shape.part(37), shape.part(38),
+                                            shape.part(39), shape.part(40), shape.part(41));
+    }
+
+    // Apply correction due to faceYAngle
+    double corrEyeAspRat = eyeAspectRatio / std::cos(degToRad(faceYAngle));
+
+    return linearScale01(corrEyeAspRat, m_cfg.eyeClosedThreshold, m_cfg.eyeOpenThreshold);
+}
+
+
+
+double FacialLandmarkDetector::calcMouthForm(dlib::full_object_detection& shape) const
+{
+    /* Mouth form parameter: 0 for normal mouth, 1 for fully smiling / laughing.
+     * Compare distance between the two corners of the mouth
+     * to the distance between the two eyes.
+     */
+
+    /* An alternative (my initial attempt) was to compare the corners of
+     * the mouth to the top of the upper lip - they almost lie on a
+     * straight line when smiling / laughing. But that is only true
+     * when facing straight at the camera. When looking up / down,
+     * the angle changes. So here we'll use the distance approach instead.
+     */
+
+    auto eye1 = centroid(shape.part(36), shape.part(37), shape.part(38),
+                         shape.part(39), shape.part(40), shape.part(41));
+    auto eye2 = centroid(shape.part(42), shape.part(43), shape.part(44),
+                         shape.part(45), shape.part(46), shape.part(47));
+    double distEyes = dist(eye1, eye2);
+    double distMouth = dist(shape.part(48), shape.part(54));
+
+    double form = linearScale01(distMouth / distEyes,
+                                m_cfg.mouthNormalThreshold,
+                                m_cfg.mouthSmileThreshold);
+
+    return form;
+}
+
+double FacialLandmarkDetector::calcMouthOpenness(
+    dlib::full_object_detection& shape,
+    double mouthForm) const
+{
+    // Use points for the bottom of the upper lip, and top of the lower lip
+    // We have 3 pairs of points available, which give the mouth height
+    // on the left, in the middle, and on the right, resp.
+    // First let's try to use an average of all three.
+    double heightLeft = dist(shape.part(63), shape.part(65));
+    double heightMiddle = dist(shape.part(62), shape.part(66));
+    double heightRight = dist(shape.part(61), shape.part(67));
+
+    double avgHeight = (heightLeft + heightMiddle + heightRight) / 3;
+
+    // Now, normalize it with the width of the mouth.
+    double width = dist(shape.part(60), shape.part(64));
+
+    double normalized = avgHeight / width;
+
+    double scaled = linearScale01(normalized,
+                                  m_cfg.mouthClosedThreshold,
+                                  m_cfg.mouthOpenThreshold,
+                                  true, false);
+
+    // Apply correction according to mouthForm
+    // Notice that when you smile / laugh, width is increased
+    scaled *= (1 + m_cfg.mouthOpenLaughCorrection * mouthForm);
+
+    return scaled;
+}
+
+double FacialLandmarkDetector::calcFaceXAngle(dlib::full_object_detection& shape) const
+{
+    // This function will be easier to understand if you refer to the
+    // diagram in faceXAngle.png
+
+    // Construct the y-axis using (1) average of four points on the nose and
+    // (2) average of four points on the upper lip.
+
+    auto y0 = centroid(shape.part(27), shape.part(28), shape.part(29),
+                       shape.part(30));
+    auto y1 = centroid(shape.part(50), shape.part(51), shape.part(52),
+                       shape.part(62));
+
+    // Now drop a perpedicular from the left and right edges of the face,
+    // and calculate the ratio between the lengths of these perpendiculars
+
+    auto left = centroid(shape.part(14), shape.part(15), shape.part(16));
+    auto right = centroid(shape.part(0), shape.part(1), shape.part(2));
+
+    // Constructing a perpendicular:
+    // Join the left/right point and the upper lip. The included angle
+    // can now be determined using cosine rule.
+    // Then sine of this angle is the perpendicular divided by the newly
+    // created line.
+    double opp = dist(right, y0);
+    double adj1 = dist(y0, y1);
+    double adj2 = dist(y1, right);
+    double angle = solveCosineRuleAngle(opp, adj1, adj2);
+    double perpRight = adj2 * std::sin(angle);
+
+    opp = dist(left, y0);
+    adj2 = dist(y1, left);
+    angle = solveCosineRuleAngle(opp, adj1, adj2);
+    double perpLeft = adj2 * std::sin(angle);
+
+    // Model the head as a sphere and look from above.
+    double theta = std::asin((perpRight - perpLeft) / (perpRight + perpLeft));
+
+    theta = radToDeg(theta);
+    if (theta < -30) theta = -30;
+    if (theta > 30) theta = 30;
+    return theta;
+}
+
+double FacialLandmarkDetector::calcFaceYAngle(dlib::full_object_detection& shape, double faceXAngle, double mouthForm) const
+{
+    // Use the nose
+    // angle between the two left/right points and the tip
+    double c = dist(shape.part(31), shape.part(35));
+    double a = dist(shape.part(30), shape.part(31));
+    double b = dist(shape.part(30), shape.part(35));
+
+    double angle = solveCosineRuleAngle(c, a, b);
+
+    // This probably varies a lot from person to person...
+
+    // Best is probably to work out some trigonometry again,
+    // but just linear interpolation seems to work ok...
+
+    // Correct for X rotation
+    double corrAngle = angle * (1 + (std::abs(faceXAngle) / 30
+                                     * m_cfg.faceYAngleXRotCorrection));
+
+    // Correct for smiles / laughs - this increases the angle
+    corrAngle *= (1 - mouthForm * m_cfg.faceYAngleSmileCorrection);
+
+    if (corrAngle >= m_cfg.faceYAngleZeroValue)
+    {
+        return -30 * linearScale01(corrAngle,
+                                   m_cfg.faceYAngleZeroValue,
+                                   m_cfg.faceYAngleDownThreshold,
+                                   false, false);
+    }
+    else
+    {
+        return 30 * (1 - linearScale01(corrAngle,
+                                       m_cfg.faceYAngleUpThreshold,
+                                       m_cfg.faceYAngleZeroValue,
+                                       false, false));
+    }
+}
+
+double FacialLandmarkDetector::calcFaceZAngle(dlib::full_object_detection& shape) const
+{
+    // Use average of eyes and nose
+
+    auto eyeRight = centroid(shape.part(36), shape.part(37), shape.part(38),
+                             shape.part(39), shape.part(40), shape.part(41));
+    auto eyeLeft = centroid(shape.part(42), shape.part(43), shape.part(44),
+                            shape.part(45), shape.part(46), shape.part(47));
+
+    auto noseLeft = shape.part(35);
+    auto noseRight = shape.part(31);
+
+    double eyeYDiff = eyeRight.y() - eyeLeft.y();
+    double eyeXDiff = eyeRight.x() - eyeLeft.x();
+
+    double angle1 = std::atan(eyeYDiff / eyeXDiff);
+
+    double noseYDiff = noseRight.y() - noseLeft.y();
+    double noseXDiff = noseRight.x() - noseLeft.x();
+
+    double angle2 = std::atan(noseYDiff / noseXDiff);
+
+    return radToDeg((angle1 + angle2) / 2);
+}
+
+void FacialLandmarkDetector::parseConfig(std::string cfgPath)
+{
+    populateDefaultConfig();
+    if (cfgPath != "")
+    {
+        std::ifstream file(cfgPath);
+
+        if (!file)
+        {
+            throw std::runtime_error("Failed to open config file");
+        }
+
+        std::string line;
+        unsigned int lineNum = 0;
+
+        while (std::getline(file, line))
+        {
+            lineNum++;
+
+            if (line[0] == '#')
+            {
+                continue;
+            }
+
+            std::istringstream ss(line);
+            std::string paramName;
+            if (ss >> paramName)
+            {
+                if (paramName == "cvVideoCaptureId")
+                {
+                    if (!(ss >> m_cfg.cvVideoCaptureId))
+                    {
+                        throwConfigError(paramName, "int",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "predictorPath")
+                {
+                    if (!(ss >> m_cfg.predictorPath))
+                    {
+                        throwConfigError(paramName, "std::string",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleCorrection")
+                {
+                    if (!(ss >> m_cfg.faceYAngleCorrection))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "eyeSmileEyeOpenThreshold")
+                {
+                    if (!(ss >> m_cfg.eyeSmileEyeOpenThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "eyeSmileMouthFormThreshold")
+                {
+                    if (!(ss >> m_cfg.eyeSmileMouthFormThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "eyeSmileMouthOpenThreshold")
+                {
+                    if (!(ss >> m_cfg.eyeSmileMouthOpenThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "showWebcamVideo")
+                {
+                    if (!(ss >> m_cfg.showWebcamVideo))
+                    {
+                        throwConfigError(paramName, "bool",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "renderLandmarksOnVideo")
+                {
+                    if (!(ss >> m_cfg.renderLandmarksOnVideo))
+                    {
+                        throwConfigError(paramName, "bool",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "lateralInversion")
+                {
+                    if (!(ss >> m_cfg.lateralInversion))
+                    {
+                        throwConfigError(paramName, "bool",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceXAngleNumTaps")
+                {
+                    if (!(ss >> m_cfg.faceXAngleNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleNumTaps")
+                {
+                    if (!(ss >> m_cfg.faceYAngleNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceZAngleNumTaps")
+                {
+                    if (!(ss >> m_cfg.faceZAngleNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthFormNumTaps")
+                {
+                    if (!(ss >> m_cfg.mouthFormNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthOpenNumTaps")
+                {
+                    if (!(ss >> m_cfg.mouthOpenNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "leftEyeOpenNumTaps")
+                {
+                    if (!(ss >> m_cfg.leftEyeOpenNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "rightEyeOpenNumTaps")
+                {
+                    if (!(ss >> m_cfg.rightEyeOpenNumTaps))
+                    {
+                        throwConfigError(paramName, "std::size_t",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "cvWaitKeyMs")
+                {
+                    if (!(ss >> m_cfg.cvWaitKeyMs))
+                    {
+                        throwConfigError(paramName, "int",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "eyeClosedThreshold")
+                {
+                    if (!(ss >> m_cfg.eyeClosedThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "eyeOpenThreshold")
+                {
+                    if (!(ss >> m_cfg.eyeOpenThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthNormalThreshold")
+                {
+                    if (!(ss >> m_cfg.mouthNormalThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthSmileThreshold")
+                {
+                    if (!(ss >> m_cfg.mouthSmileThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthClosedThreshold")
+                {
+                    if (!(ss >> m_cfg.mouthClosedThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthOpenThreshold")
+                {
+                    if (!(ss >> m_cfg.mouthOpenThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "mouthOpenLaughCorrection")
+                {
+                    if (!(ss >> m_cfg.mouthOpenLaughCorrection))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleXRotCorrection")
+                {
+                    if (!(ss >> m_cfg.faceYAngleXRotCorrection))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleSmileCorrection")
+                {
+                    if (!(ss >> m_cfg.faceYAngleSmileCorrection))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleZeroValue")
+                {
+                    if (!(ss >> m_cfg.faceYAngleZeroValue))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleUpThreshold")
+                {
+                    if (!(ss >> m_cfg.faceYAngleUpThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else if (paramName == "faceYAngleDownThreshold")
+                {
+                    if (!(ss >> m_cfg.faceYAngleDownThreshold))
+                    {
+                        throwConfigError(paramName, "double",
+                                         line, lineNum);
+                    }
+                }
+                else
+                {
+                    std::ostringstream oss;
+                    oss << "Unrecognized parameter name at line " << lineNum
+                        << ": " << paramName;
+                    throw std::runtime_error(oss.str());
+                }
+            }
+        }
+    }
+}
+
+void FacialLandmarkDetector::populateDefaultConfig(void)
+{
+    // These are values that I've personally tested to work OK for my face.
+    // Your milage may vary - hence the config file.
+
+    m_cfg.cvVideoCaptureId = 0;
+    m_cfg.predictorPath = "shape_predictor_68_face_landmarks.dat";
+    m_cfg.faceYAngleCorrection = 10;
+    m_cfg.eyeSmileEyeOpenThreshold = 0.6;
+    m_cfg.eyeSmileMouthFormThreshold = 0.75;
+    m_cfg.eyeSmileMouthOpenThreshold = 0.5;
+    m_cfg.showWebcamVideo = true;
+    m_cfg.renderLandmarksOnVideo = true;
+    m_cfg.lateralInversion = true;
+    m_cfg.cvWaitKeyMs = 5;
+    m_cfg.faceXAngleNumTaps = 11;
+    m_cfg.faceYAngleNumTaps = 11;
+    m_cfg.faceZAngleNumTaps = 11;
+    m_cfg.mouthFormNumTaps = 3;
+    m_cfg.mouthOpenNumTaps = 3;
+    m_cfg.leftEyeOpenNumTaps = 3;
+    m_cfg.rightEyeOpenNumTaps = 3;
+    m_cfg.eyeClosedThreshold = 0.2;
+    m_cfg.eyeOpenThreshold = 0.25;
+    m_cfg.mouthNormalThreshold = 0.75;
+    m_cfg.mouthSmileThreshold = 1.0;
+    m_cfg.mouthClosedThreshold = 0.1;
+    m_cfg.mouthOpenThreshold = 0.4;
+    m_cfg.mouthOpenLaughCorrection = 0.2;
+    m_cfg.faceYAngleXRotCorrection = 0.15;
+    m_cfg.faceYAngleSmileCorrection = 0.075;
+    m_cfg.faceYAngleZeroValue = 1.8;
+    m_cfg.faceYAngleDownThreshold = 2.3;
+    m_cfg.faceYAngleUpThreshold = 1.3;
+}
+
+void FacialLandmarkDetector::throwConfigError(std::string paramName,
+                                              std::string expectedType,
+                                              std::string line,
+                                              unsigned int lineNum)
+{
+    std::ostringstream ss;
+    ss << "Error parsing config file for parameter " << paramName
+       << "\nAt line " << lineNum << ": " << line
+       << "\nExpecting value of type " << expectedType;
+
+    throw std::runtime_error(ss.str());
+}
+
diff --git a/src/math_utils.h b/src/math_utils.h
new file mode 100644 (file)
index 0000000..d465c87
--- /dev/null
@@ -0,0 +1,108 @@
+// -*- mode: c++ -*-
+
+#ifndef __FACE_DETECTOR_MATH_UTILS_H__
+#define __FACE_DETECTOR_MATH_UTILS_H__
+
+/****
+Copyright (c) 2020 Adrian I. Lam
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+****/
+
+#include <cmath>
+#include <initializer_list>
+#include <dlib/image_processing.h>
+
+static const double PI = 3.14159265358979;
+
+template<class T>
+static double avg(T container, double defaultValue = 0)
+{
+    if (container.size() == 0)
+    {
+        return defaultValue;
+    }
+
+    double sum = 0;
+    for (auto it = container.begin(); it != container.end(); ++it)
+    {
+        sum += *it;
+    }
+    return sum / container.size();
+}
+
+template<class... Args>
+static dlib::point centroid(Args&... args)
+{
+    std::size_t numArgs = sizeof...(args);
+    if (numArgs == 0) return dlib::point(0, 0);
+
+    double sumX = 0, sumY = 0;
+    for (auto point : {args...})
+    {
+        sumX += point.x();
+        sumY += point.y();
+    }
+
+    return dlib::point(sumX / numArgs, sumY / numArgs);
+}
+
+static inline double sq(double x)
+{
+    return x * x;
+}
+
+static double solveCosineRuleAngle(double opposite,
+                                   double adjacent1,
+                                   double adjacent2)
+{
+    // c^2 = a^2 + b^2 - 2 a b cos(C)
+    double cosC = (sq(opposite) - sq(adjacent1) - sq(adjacent2)) /
+                  (-2 * adjacent1 * adjacent2);
+    return std::acos(cosC);
+}
+
+static inline double radToDeg(double rad)
+{
+    return rad * 180 / PI;
+}
+
+static inline double degToRad(double deg)
+{
+    return deg * PI / 180;
+}
+
+double dist(dlib::point& p1, dlib::point& p2)
+{
+    double xDist = p1.x() - p2.x();
+    double yDist = p1.y() - p2.y();
+
+    return std::hypot(xDist, yDist);
+}
+
+/*! Scale linearly from 0 to 1 (both end-points inclusive) */
+double linearScale01(double num, double min, double max,
+                     bool clipMin = true, bool clipMax = true)
+{
+    if (num < min && clipMin) return 0.0;
+    if (num > max && clipMax) return 1.0;
+    return (num - min) / (max - min);
+}
+
+#endif