#include "FrameGrabber.h" #include "globals.h" using namespace std; using namespace cv; FrameGrabber::FrameGrabber(QString id, int code, QObject* parent) : QAbstractVideoSurface(parent) , id(id) , code(code) , yuvBuffer(nullptr) , yuvBufferSize(0) , watchdog(new QTimer(this)) , timeoutMs(2e3) { #ifdef TURBOJPEG tjh = tjInitDecompress(); #endif connect(watchdog, SIGNAL(timeout()), this, SIGNAL(timedout())); pmIdx = gPerformanceMonitor.enrol(id, "Frame Grabber"); } FrameGrabber::~FrameGrabber() { watchdog->stop(); watchdog->deleteLater(); #ifdef TURBOJPEG tjDestroy(tjh); #endif } QList FrameGrabber::supportedPixelFormats(QAbstractVideoBuffer::HandleType handleType) const { Q_UNUSED(handleType); return QList() << QVideoFrame::Format_Jpeg << QVideoFrame::Format_RGB32 << QVideoFrame::Format_YUYV << QVideoFrame::Format_RGB24; //<< QVideoFrame::Format_ARGB32_Premultiplied //<< QVideoFrame::Format_RGB565 //<< QVideoFrame::Format_RGB555 //<< QVideoFrame::Format_ARGB8565_Premultiplied //<< QVideoFrame::Format_BGRA32 //<< QVideoFrame::Format_BGRA32_Premultiplied //<< QVideoFrame::Format_BGR24 //<< QVideoFrame::Format_BGR565 //<< QVideoFrame::Format_BGR555 //<< QVideoFrame::Format_BGRA5658_Premultiplied //<< QVideoFrame::Format_AYUV444 //<< QVideoFrame::Format_AYUV444_Premultiplied //<< QVideoFrame::Format_YUV444 //<< QVideoFrame::Format_YUV420P //<< QVideoFrame::Format_YV12 //<< QVideoFrame::Format_UYVY //<< QVideoFrame::Format_YUYV //<< QVideoFrame::Format_NV12 //<< QVideoFrame::Format_NV21 //<< QVideoFrame::Format_IMC1 //<< QVideoFrame::Format_IMC2 //<< QVideoFrame::Format_IMC3 //<< QVideoFrame::Format_IMC4 //<< QVideoFrame::Format_Y8 //<< QVideoFrame::Format_Y16 //<< QVideoFrame::Format_CameraRaw //<< QVideoFrame::Format_AdobeDng; } Timestamp FrameGrabber::getTimestamp(const QVideoFrame& frame) { /* We have three options for timestamps: * 1 EyeRec software timestamp (t) * 2 Frame presentation timestamp (pts) * 3 Frame software timestamp (fts) -- at least for the uvcengine * * However, we can't guarante the source for pts and fts, so we need to keep * track of drift */ auto ts = gTimer.elapsed(); // 1 auto selected = ts; if (frame.startTime() > 0 && frame.endTime() > 0) { // 2 const auto pts = 1e-3 * 0.5 * (frame.endTime() + frame.startTime()); selected = drift.correct(ts, pts); } else { // 3 const auto& metaNow = frame.metaData("steady_clock::now"); if (metaNow.isValid()) { const auto fts = gTimer.elapsed(qvariant_cast(metaNow)); selected = drift.correct(ts, fts); } } return selected; } bool FrameGrabber::present(const QVideoFrame& frame) { /* * IMPORTANT: * * This frame's data lifetime is not guaranteed once we leave this function * so it shouldn't be used outside. * If sending the data somewhere else (e.g., for the preview) is necessary, * we must copy the data * */ using namespace std::chrono; // Get SW timestamp asap auto t = getTimestamp(frame); if (!frame.isValid()) return false; QVideoFrame copy(frame); Mat cvFrame; copy.map(QAbstractVideoBuffer::ReadOnly); bool success = false; switch (frame.pixelFormat()) { case QVideoFrame::Format_Jpeg: success = jpeg2bmp(copy, cvFrame); break; case QVideoFrame::Format_RGB32: success = rgb32_2bmp(copy, cvFrame); break; case QVideoFrame::Format_YUYV: success = yuyv_2bmp(copy, cvFrame); break; default: qWarning() << "Unknown pixel format:" << frame.pixelFormat(); break; } copy.unmap(); if (success && !cvFrame.empty()) { watchdog->start(timeoutMs); emit newFrame(t, cvFrame); } else gPerformanceMonitor.account(pmIdx); return success; } void FrameGrabber::setColorCode(int code) { this->code = code; } bool FrameGrabber::jpeg2bmp(const QVideoFrame& in, cv::Mat& cvFrame) { auto frame = const_cast(in.bits()); int len = in.mappedBytes(); #ifdef TURBOJPEG int width, height, subsamp, res; res = tjDecompressHeader2(tjh, frame, len, &width, &height, &subsamp); if (res < 0) { qWarning() << QString("Frame drop; invalid header: ").append(tjGetErrorStr()); return false; } long unsigned int bufSize = tjBufSizeYUV(width, height, subsamp); if (bufSize != yuvBufferSize) { //qInfo() << "YUV buffer size changed"; yuvBufferSize = bufSize; delete yuvBuffer; yuvBuffer = new unsigned char[yuvBufferSize]; } res = tjDecompressToYUV(tjh, frame, len, yuvBuffer, 0); if (res < 0) { qWarning() << QString("Frame drop; failed to decompress: ").append(tjGetErrorStr()); return false; } cvFrame = Mat(height, width, code); int decode = code == CV_8UC3 ? TJPF_BGR : TJPF_GRAY; res = tjDecodeYUV(tjh, yuvBuffer, 4, subsamp, cvFrame.data, width, 0, height, decode, 0); if (res < 0) { qWarning() << QString("Frame drop; failed to decode: ").append(tjGetErrorStr()); return false; } #else std::vector data(frame, frame + len); if (code == CV_8U) cvFrame = imdecode(Mat(data), CV_LOAD_IMAGE_GRAYSCALE); else cvFrame = imdecode(Mat(data), CV_LOAD_IMAGE_COLOR); #endif return true; } bool FrameGrabber::rgb32_2bmp(const QVideoFrame& in, cv::Mat& cvFrame) { // why abs? Some cameras seem to report some negative frame sizes for DirectShow; I'm looking at you Grasshopper! Mat rgba = Mat(abs(in.height()), abs(in.width()), CV_8UC4, (void*)in.bits()); if (code == CV_8UC3) cvtColor(rgba, cvFrame, CV_BGRA2BGR); else cvtColor(rgba, cvFrame, CV_BGRA2GRAY); return true; } bool FrameGrabber::yuyv_2bmp(const QVideoFrame& in, cv::Mat& cvFrame) { // some of the cheaper cameras tend to mess up the data for the first frames if (abs(in.height()) * abs(in.width()) * 2 > in.mappedBytes()) return false; Mat yuyv = Mat(abs(in.height()), abs(in.width()), CV_8UC2, (void*)in.bits()); if (code == CV_8UC3) cvtColor(yuyv, cvFrame, CV_YUV2BGR_YUYV); else cvtColor(yuyv, cvFrame, CV_YUV2GRAY_YUYV); return true; } //TODO: add support for other frame formats