Commit 08f72c52 authored by Thiago Santini's avatar Thiago Santini

Squashed Grip integration

Removing history because of NDAs.
commit 0b939b0ccfb64ef6c7116190bbb3de5a39ecad63 from grip-onto-master

The eye model is not ready yet, and, from experience, I don't think it ever will
because the stable eye center premise does not hold for most practical
cases. Do not count on the model being accurate.
parent 885f6c50
......@@ -70,7 +70,12 @@ SOURCES +=\
$${TOP}/src/gaze-estimation/GazeVectorBinocularPolyFit.cpp \
$${TOP}/src/post-processing/PostProcessingWidget.cpp \
$${TOP}/src/post-processing/VideoSource.cpp \
$${TOP}/src/gaze-estimation/GazeEstimate.cpp
$${TOP}/src/gaze-estimation/GazeEstimate.cpp \
$${TOP}/src/eye-context/EyeContext.cpp \
$${TOP}/src/eye-context/EyeModel.cpp \
$${TOP}/src/eye-context/EyeModelSampler.cpp \
$${TOP}/src/eye-context/EyeModelUtils.cpp \
$${TOP}/src/eye-context/singleeyefitter/SingleEyeFitter.cpp
HEADERS += \
$${TOP}/src/MainWindow.h\
......@@ -122,6 +127,26 @@ HEADERS += \
$${TOP}/src/post-processing/VideoSource.h \
$${TOP}/src/gaze-estimation/GazeEstimate.h \
$${TOP}/src/gaze-estimation/GazeEstimate.h \
$${TOP}/src/MonotonicClock.h \
$${TOP}/src/eye-context/EyeContext.h \
$${TOP}/src/eye-context/EyeModel.h \
$${TOP}/src/eye-context/EyeModelSample.h \
$${TOP}/src/eye-context/EyeModelSampler.h \
$${TOP}/src/eye-context/EyeModelUtils.h \
$${TOP}/src/eye-context/singleeyefitter/Circle.h \
$${TOP}/src/eye-context/singleeyefitter/Conic.h \
$${TOP}/src/eye-context/singleeyefitter/Conicoid.h \
$${TOP}/src/eye-context/singleeyefitter/cvx.h \
$${TOP}/src/eye-context/singleeyefitter/distance.h \
$${TOP}/src/eye-context/singleeyefitter/Ellipse.h \
$${TOP}/src/eye-context/singleeyefitter/fun.h \
$${TOP}/src/eye-context/singleeyefitter/intersect.h \
$${TOP}/src/eye-context/singleeyefitter/math.h \
$${TOP}/src/eye-context/singleeyefitter/projection.h \
$${TOP}/src/eye-context/singleeyefitter/sef-utils.h \
$${TOP}/src/eye-context/singleeyefitter/SingleEyeFitter.h \
$${TOP}/src/eye-context/singleeyefitter/solve.h \
$${TOP}/src/eye-context/singleeyefitter/Sphere.h
$${TOP}/src/MonotonicClock.h
FORMS += \
......
......@@ -9,6 +9,7 @@ EyeImageProcessor::EyeImageProcessor(QString id, QObject* parent)
, id(id)
, pupilDetectionMethod(nullptr)
, pupilTrackingMethod(nullptr)
, eyeContextPtr(nullptr)
{
availablePupilDetectionMethods.push_back(std::make_shared<PuRe>());
availablePupilDetectionMethods.push_back(std::make_shared<ElSe>());
......@@ -26,6 +27,7 @@ EyeImageProcessor::EyeImageProcessor(QString id, QObject* parent)
updateConfig();
pmIdx = gPerformanceMonitor.enrol(id, "Image Processor");
eyeContextPtr = std::make_shared<EyeContext>(id);
}
void EyeImageProcessor::updateConfig()
......@@ -134,6 +136,10 @@ void EyeImageProcessor::process(Timestamp timestamp, const Mat& frame)
}
data.modelData = EyeModelData();
if (eyeContextPtr) {
eyeContextPtr->update(data.timestamp, data.input, data.pupil);
eyeContextPtr->getActiveModelData(data.modelData);
}
data.cameraCalibration = cameraCalibration;
data.processingTimestamp = gTimer.elapsed() - processingStart;
......
......@@ -30,6 +30,7 @@
#include "pupil-detection/Swirski.h"
#endif
#include "CameraCalibration.h"
#include "eye-context/EyeContext.h"
#include "globals.h"
#include "ocv_utils.h"
#include "pupil-detection/PupilDetectionMethod.h"
......@@ -252,6 +253,7 @@ private:
std::shared_ptr<PupilDetectionMethod> pupilDetectionMethod;
std::shared_ptr<PupilTrackingMethod> pupilTrackingMethod;
std::shared_ptr<EyeContext> eyeContextPtr;
unsigned int pmIdx;
};
......
......@@ -20,6 +20,8 @@ GazeEstimation::GazeEstimation(QObject* parent)
//availableGazeEstimationMethods.push_back(make_shared<BinocularPolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX));
//availableGazeEstimationMethods.push_back(make_shared<BinocularPolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXX_YYY));
availableGazeEstimationMethods.push_back(make_shared<GazeVectorBinocularPolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXYY, GazeVectorBinocularPolyFit::Mode::INSTANTANEOUS));
availableGazeEstimationMethods.push_back(make_shared<PolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXYY));
availableGazeEstimationMethods.push_back(make_shared<PolyFit>(PolyFit::POLY_X_Y_XY));
//availableGazeEstimationMethods.push_back(make_shared<PolyFit>(PolyFit::POLY_X_Y_XY_XX_YY));
......@@ -28,13 +30,6 @@ GazeEstimation::GazeEstimation(QObject* parent)
//availableGazeEstimationMethods.push_back(make_shared<PolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXX_YYY));
availableGazeEstimationMethods.push_back(make_shared<Homography>());
// Deactivated until the eye context gets integrated
//availableGazeEstimationMethods.push_back(make_shared<GazeVectorBinocularPolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXYY, GazeVectorBinocularPolyFit::Mode::INSTANTANEOUS));
//availableGazeEstimationMethods.push_back(make_shared<GazeVectorBinocularPolyFit>(PolyFit::POLY_X_Y_XY, GazeVectorBinocularPolyFit::Mode::INSTANTANEOUS));
//availableGazeEstimationMethods.push_back(make_shared<GazeVectorBinocularPolyFit>(PolyFit::POLY_X_Y_XY_XX_YY_XYY_YXX_XXYY, GazeVectorBinocularPolyFit::Mode::TEMPORAL));
//availableGazeEstimationMethods.push_back(make_shared<GazeVectorBinocularPolyFit>(PolyFit::POLY_X_Y_XY, GazeVectorBinocularPolyFit::Mode::TEMPORAL));
}
GazeEstimation::~GazeEstimation()
......
......@@ -125,7 +125,7 @@ MainWindow::MainWindow(QWidget* parent)
postProcessingWidget = new PostProcessingWidget("Post Processing Widget");
postProcessingWidget->setDefaults(false);
setupWidget(postProcessingWidget, settings, ui->postProcessing);
ui->postProcessing->hide();
//ui->postProcessing->hide();
connect(postProcessingWidget, SIGNAL(setWorkingDirectory(QString)),
this, SLOT(setWorkingDirectory(QString)));
connect(postProcessingWidget, SIGNAL(restorePreviousPwd()),
......
#include "EyeContext.h"
using namespace std;
using namespace cv;
EyeContextParams::EyeContextParams()
: maxEyeModels(3)
, minInterModelPeriodMs(20e3)
, maxNonActiveAgeMs(30e3)
, dbg(false)
{
}
EyeContext::EyeContext(QString& id, QObject* parent)
: QObject(parent)
, id(id)
, modelIdx(0)
, lastModelCreation(0)
, activeModel(nullptr)
{
for (size_t i = 0; i < params.maxEyeModels; i++)
dbgIds.push(makeModelName(i));
}
QString EyeContext::makeModelName(int idx)
{
QString name;
if (id.contains("left", Qt::CaseInsensitive))
name = "Left-";
else if (id.contains("right", Qt::CaseInsensitive))
name = "Right-";
else
name = "Unknown-";
name.append(QString::number(idx));
return name;
}
void EyeContext::createModel()
{
if (eyeModels.size() > 0 && now - lastModelCreation < params.minInterModelPeriodMs)
return;
if (eyeModels.size() >= params.maxEyeModels)
return;
lastModelCreation = now;
QString name = makeModelName(modelIdx);
eyeModels.push_back(make_shared<EyeModel>(name, focal_length, now, dbgIds.front()));
dbgIds.pop();
modelIdx++;
if (params.dbg)
qDebug() << "Created" << eyeModels.back()->name();
}
std::list<EyeModelPtr>::iterator EyeContext::destroyModel(std::list<EyeModelPtr>::iterator& it)
{
dbgIds.push((*it)->dbgId);
if (params.dbg)
qDebug() << "Destroyed" << (*it)->name();
return eyeModels.erase(it);
}
void EyeContext::setActive(const EyeModelPtr& model)
{
if (activeModel == model)
return;
if (params.dbg) {
auto name = [](const EyeModelPtr& p) { return p ? p->name() : "Null"; };
qDebug() << QString("Active: %1 -> %2").arg(name(activeModel)).arg(name(model));
}
activeModel = model;
}
void EyeContext::update(const Timestamp& t, const Mat& img, const Pupil& pupil)
{
now = t;
focal_length = EyeModelUtils::estimateFocalLength(img);
// make sure we always have one model
if (eyeModels.size() == 0)
createModel();
if (!activeModel) // and set the first one as active asap so we already have unprojections
setActive(eyeModels.front());
// register the current data with all models
for (auto& eyeModelPtr : eyeModels)
eyeModelPtr->registerData(t, img, pupil);
manageModels(t);
return;
}
void EyeContext::manageModels(const Timestamp& t)
{
if (!activeModel)
return;
if (activeModel->shoulStartAlternative(t))
createModel();
// should replace?
if (activeModel->yieldActive(t)) {
for (auto& model : eyeModels) {
if (model == activeModel)
continue;
if (!model->isContender())
continue;
if (activeModel->confidence() > 0.01) {
if (model->agreementRate.score() < activeModel->agreementRate.score())
continue;
}
setActive(model);
}
}
for (auto it = eyeModels.begin(); it != eyeModels.end();) {
auto& model = (*it);
if (model != activeModel) {
bool shouldRemove = false;
shouldRemove |= model->age(now) > params.maxNonActiveAgeMs; // good, but not good enough to take over
shouldRemove |= model->yieldNotActive(t);
if (shouldRemove) {
it = destroyModel(it);
continue;
}
}
++it;
}
}
#ifndef EYECONTEXT_H
#define EYECONTEXT_H
#include <memory>
#include <QDebug>
#include <QObject>
#include <opencv2/opencv.hpp>
#include "data/EyeModelData.h"
#include "eye-context/EyeModel.h"
#include "pupil-detection/PupilDetectionMethod.h"
#include "utils.h"
using EyeModelPtr = std::shared_ptr<EyeModel>;
struct EyeContextParams {
EyeContextParams();
const size_t maxEyeModels;
Timestamp minInterModelPeriodMs;
Timestamp maxNonActiveAgeMs;
bool dbg;
};
class EyeContext : QObject {
Q_OBJECT
public:
EyeContext(QString& id, QObject* parent = nullptr);
void getActiveModelData(EyeModelData& modelData) const
{
if (activeModel)
activeModel->getData(modelData);
}
public slots:
void update(const Timestamp& t, const cv::Mat& img, const Pupil& pupil);
private:
EyeContextParams params;
QString id;
int modelIdx;
Timestamp now;
Timestamp lastModelCreation;
float focal_length;
// holds all models
std::list<EyeModelPtr> eyeModels;
// current active model
EyeModelPtr activeModel;
std::queue<QString> dbgIds;
void createModel();
std::list<EyeModelPtr>::iterator destroyModel(std::list<EyeModelPtr>::iterator &it);
void setActive(const EyeModelPtr& ptr);
void manageModels(const Timestamp& t);
QString makeModelName(int idx);
};
#endif // EYECONTEXT_H
#include "EyeModel.h"
#include "ocv_utils.h"
using namespace std;
using namespace cv;
EyeModelParams::EyeModelParams()
: dbg(false)
, minSamplesToRecondition(1)
, minMsToRecondition(2e3)
, minModelCoverage(0.15f)
, agreementRateWindowMs(20e3)
, minPupilConfidence(0.66f)
, maxPupilAspectRatio(0.95f)
, minAgreement3d(EyeModelUtils::degree2agreement(4.0f))
, maxExpectedAgreement3d(EyeModelUtils::degree2agreement(1.5f))
{
}
EyeModel::EyeModel(const QString& id, const double focalLength, const Timestamp& birth, const QString& dbgId)
: id(id)
, dbgId(dbgId)
, sefPtr(nullptr)
, reconditioningFitter(false)
, focalLength(focalLength)
, birth(birth)
, imgSize(0, 0)
, lastRecondition(0)
, lastAgreement(0)
, newSamples(0)
, inlierRatio(0)
, fitterFitness(0)
{
agreementRate.maxAgeMs = params.agreementRateWindowMs;
if (params.dbg) {
QMetaObject::invokeMethod(&gCvProxy, "namedWindow", Qt::QueuedConnection, Q_ARG(QString, dbgId), Q_ARG(int, cv::WINDOW_AUTOSIZE));
Point pos;
Size expected = { 400, 240 };
if (dbgId.contains("0"))
pos = { 0, 0 };
else if (dbgId.contains("1"))
pos = {
static_cast<int>(expected.width),
static_cast<int>(0)
};
else if (dbgId.contains("2"))
pos = {
static_cast<int>(2 * expected.width),
static_cast<int>(0)
};
QMetaObject::invokeMethod(&gCvProxy, "moveWindow", Qt::QueuedConnection, Q_ARG(QString, dbgId), Q_ARG(int, pos.x), Q_ARG(int, pos.y));
}
}
void EyeModel::registerData(const Timestamp& t, const Mat& img, const Pupil& pupil)
{
using namespace EyeModelUtils;
// create and initialize sample
// note that if we don't have a fitter yet, sample is not initialised until we create one
currentSample = { t, pupil, makeSefObservation(img, pupil) };
initializeSample(currentSample);
// while the fitter is being updated, we don't touch anything else
if (!finishedReconditioningFitter())
return;
// reset if input size has changed
const Size2f& size = { static_cast<float>(img.cols), static_cast<float>(img.rows) };
if (size != imgSize)
reset(size);
// if pupil is good enough, use it to update the model
if (pupil.valid(params.minPupilConfidence)) {
// if we want to limit to stable unprojections...
//if (pupil.axesRatio() < params.maxPupilAspectRatio)
if (sampler.appendUnique(currentSample))
newSamples++;
// starts the asynchronous update if necessary
startFitterReconditioning(t);
// finally, update the sample and model's status
currentSample.evaluateAgreement();
updateStatus(t);
}
if (params.dbg)
showDebug(t, img);
}
void EyeModel::initializeSample(EyeModelSample& sample) const
{
/*
* TODO:
* For practicity, we use the current model center to disambiguate between the
* two pupil2d unprojections even in the instantaneous case. This works even
* if the eye center scale is completely wrong. The only danger is in case
* the eye tracker moves enough such that the wrong unprojection is chosen.
*/
using namespace EyeModelUtils;
if (sefPtr && sefPtr->eye) {
if (sample.pupil2d.valid(params.minPupilConfidence)) {
sample.pupil3d.observation = sample.observation;
sample.unprojected = sefPtr->unproject_single_observation(sample.pupil3d, sefPtr->eye.radius);
sefPtr->initialise_single_observation(sample.pupil3d);
sample.intersected = sample.pupil3d.circle;
if (sample.intersected)
sample.pupilProjection = projectToImg<double>(imgSize, sample.intersected, focalLength);
}
sample.eyeProjection = projectToImg(imgSize, sefPtr->eye, focalLength);
}
}
void EyeModel::reset(const cv::Size2f& size)
{
imgSize = size;
sefPtr = nullptr;
sampler.clear();
newSamples = 0;
lastRecondition = 0;
}
void EyeModel::startFitterReconditioning(const Timestamp& t)
{
if (sefPtr && t - lastRecondition < params.minMsToRecondition)
return;
if (reconditioningFitter)
return;
if (sampler.size() < 2)
return;
if (newSamples >= params.minSamplesToRecondition) {
reconditioningFitter = true;
newSamples = 0;
lastRecondition = t;
future = std::async(std::launch::async, [this]() {
auto ptr = std::make_unique<singleeyefitter::EyeModelFitter>(focalLength, 5, 1.5);
for (auto entry : sampler)
ptr->add_observation(get<EyeModelSample>(entry).observation);
auto inliers = ptr->unproject_observations();
inliersSet = std::set<size_t>(inliers.begin(), inliers.end());
ptr->initialise_model();
#ifdef HAS_CERES
ptr->refine_with_inliers();
#endif
return ptr;
});
}
}
bool EyeModel::finishedReconditioningFitter()
{
if (!reconditioningFitter)
return true;
if (future.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
auto newSefPtr = future.get();
if (newSefPtr->eye) {
sefPtr = move(newSefPtr);
size_t idx = 0;
vector<float> agreements;
for (auto& entry : sampler) {
auto& s = get<EyeModelSample>(entry);
initializeSample(s);
s.evaluateAgreement();
s.inlier = inliersSet.count(idx) > 0;
if (s.inlier) {
if (s.pupil2d.axesRatio() <= params.maxPupilAspectRatio)
agreements.push_back(s.agreement3d);
}
idx++;
}
inlierRatio = inliersSet.size() / static_cast<float>(sampler.size());
if (agreements.size() > 0) {
// fitterFitness = accumulate(agreements.begin(), agreements.end(), 0.0) / agreements.size();
fitterFitness = median(agreements);
} else
fitterFitness = 0;
// TODO: only replace model if fitness and inlier ratios are ok?
sampler.updateCoverage();
}
reconditioningFitter = false;
return true;
}
return false;
}
void EyeModel::updateStatus(const Timestamp& t)
{
const auto agreed = currentSample.intersected && (currentSample.agreement3d >= params.minAgreement3d || currentSample.pupil2d.axesRatio() >= params.maxPupilAspectRatio);
if (agreed)
lastAgreement = t;
// We penalize the model only up to a point
agreementRate.udpate(t, max(currentSample.agreement3d, params.minAgreement3d), agreed);
}
bool EyeModel::shoulStartAlternative(const Timestamp t) const
{
if (sinceLastAgreement(t) > 3e3 || agreementRate.disagreement() > 0.5)
return true;
return false;
}
bool EyeModel::yieldActive(const Timestamp& t) const
{
if (agreementRate.disagreement() > 0.25)
return true;
return false;
}
bool EyeModel::yieldNotActive(const Timestamp& t) const
{
if (age(t) < 20e3)
return false;
if (confidence() < 0.1)
return true;
if (agreementRate.disagreement() > 0.25)
return true;
return false;
}
bool EyeModel::isContender() const
{
return sampler.coverage() > params.minModelCoverage && inlierRatio > 0.5;
}
float EyeModel::confidence() const
{
return normalize<float>(
std::min<float>(agreementRate.score(), params.maxExpectedAgreement3d),
params.minAgreement3d,
params.maxExpectedAgreement3d);
}
float EyeModel::coverage() const
{
return sampler.coverage();
}
float EyeModel::fitness() const
{
return fitterFitness;
}
void EyeModel::getData(EyeModelData& modelData) const
{
using namespace EyeModelUtils;
if (sefPtr && sefPtr->eye) {
modelData.center = toCv(sefPtr->eye.centre);
modelData.eyeProjection = currentSample.eyeProjection;
modelData.confidence = confidence();
modelData.gaze = GazeVector(currentSample.intersected.normal);
modelData.instantaneousGaze = GazeVector(currentSample.unprojected.normal);