diff --git a/CMakeLists.txt b/CMakeLists.txt index 8da38b9..cb0046c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,15 @@ find_package(KF5 ${KF_MIN_VERSION} REQUIRED COMPONENTS Kirigami2 WindowSystem I1 find_package(Libmpv) set_package_properties(Libmpv PROPERTIES TYPE REQUIRED) +find_package(FFmpeg) +set_package_properties(FFmpeg PROPERTIES TYPE REQUIRED) + +find_package(YouTubeDl) +set_package_properties(YouTubeDl PROPERTIES TYPE RUNTIME) + +find_package(Ytdlp) +set_package_properties(Ytdlp PROPERTIES TYPE RUNTIME) + FIND_PACKAGE(LIBPODOFO) IF(NOT LIBPODOFO_FOUND) MESSAGE("+++PoDoFo not found... building private copy") diff --git a/shell.nix b/shell.nix index d01cb90..5ad3090 100644 --- a/shell.nix +++ b/shell.nix @@ -14,6 +14,7 @@ mkShell rec { pkg-config libsForQt5.wrapQtAppsHook makeWrapper + # ffmpeg ]; buildInputs = [ @@ -34,11 +35,11 @@ mkShell rec { libsForQt5.ki18n libsForQt5.kcoreaddons libsForQt5.kguiaddons + # libsForQt5.kconfig podofo mpv - # libsForQt5.kconfig - # ffmpeg-full + ffmpeg-full # yt-dlp # Rust tools diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 84ec714..2d3a949 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -16,6 +16,15 @@ target_sources(presenter cpp/presentationsqlmodel.cpp cpp/presentationsqlmodel.h cpp/mpv/mpvobject.h cpp/mpv/mpvobject.cpp cpp/mpv/qthelper.hpp cpp/mpv/mpvhelpers.h + cpp/framedecoder.h cpp/framedecoder.cpp ) target_compile_options (presenter PUBLIC -fexceptions) + +target_link_libraries(presenter PRIVATE + ${AVUTIL_LIBRARIES} + ${AVFILTER_LIBRARIES} + ${AVFORMAT_LIBRARIES} + ${AVCODEC_LIBRARIES} + ${SWSCALE_LIBRARIES} +) diff --git a/src/cpp/framedecoder.cpp b/src/cpp/framedecoder.cpp new file mode 100644 index 0000000..96a9af3 --- /dev/null +++ b/src/cpp/framedecoder.cpp @@ -0,0 +1,439 @@ +/* + SPDX-FileCopyrightText: 2010 Dirk Vanden Boer + + SPDX-License-Identifier: GPL-2.0-or-later +*/ + +#include "framedecoder.h" + +#include +#include +#include + +extern "C" { +#include +#include +} + +using namespace std; + +FrameDecoder::FrameDecoder(const QString &filename, AVFormatContext *pavContext) + : m_VideoStream(-1) + , m_pFormatContext(pavContext) + , m_pVideoCodecContext(nullptr) + , m_pVideoCodec(nullptr) + , m_pFrame(nullptr) + , m_pFrameBuffer(nullptr) + , m_pPacket(nullptr) + , m_FormatContextWasGiven(pavContext != nullptr) + , m_AllowSeek(true) + , m_initialized(false) + , m_bufferSinkContext(nullptr) + , m_bufferSourceContext(nullptr) + , m_filterGraph(nullptr) + , m_filterFrame(nullptr) +{ + initialize(filename); +} + +FrameDecoder::~FrameDecoder() +{ + destroy(); +} + +void FrameDecoder::initialize(const QString &filename) +{ + m_lastWidth = -1; + m_lastHeight = -1; + m_lastPixfmt = AV_PIX_FMT_NONE; + +#if (LIBAVFORMAT_VERSION_MAJOR < 58) + av_register_all(); +#endif + + QFileInfo fileInfo(filename); + + if ((!m_FormatContextWasGiven) && avformat_open_input(&m_pFormatContext, fileInfo.absoluteFilePath().toLocal8Bit().data(), nullptr, nullptr) != 0) { + qDebug() << "Could not open input file: " << fileInfo.absoluteFilePath(); + return; + } + + if (avformat_find_stream_info(m_pFormatContext, nullptr) < 0) { + qDebug() << "Could not find stream information"; + return; + } + + if (!initializeVideo()) { + // It already printed a message + return; + } + m_pFrame = av_frame_alloc(); + + if (m_pFrame) { + m_initialized = true; + } +} + +bool FrameDecoder::getInitialized() +{ + return m_initialized; +} + +void FrameDecoder::destroy() +{ + deleteFilterGraph(); + if (m_pVideoCodecContext) { + avcodec_close(m_pVideoCodecContext); + avcodec_free_context(&m_pVideoCodecContext); + m_pVideoCodecContext = nullptr; + } + + if ((!m_FormatContextWasGiven) && m_pFormatContext) { + avformat_close_input(&m_pFormatContext); + m_pFormatContext = nullptr; + } + + if (m_pPacket) { + av_packet_unref(m_pPacket); + delete m_pPacket; + m_pPacket = nullptr; + } + + if (m_pFrame) { + av_frame_free(&m_pFrame); + m_pFrame = nullptr; + } + + if (m_pFrameBuffer) { + av_free(m_pFrameBuffer); + m_pFrameBuffer = nullptr; + } +} + +QString FrameDecoder::getCodec() +{ + QString codecName; + if (m_pVideoCodec) { + codecName = QString::fromLatin1(m_pVideoCodec->name); + } + return codecName; +} + +bool FrameDecoder::initializeVideo() +{ + m_VideoStream = av_find_best_stream(m_pFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &m_pVideoCodec, 0); + if (m_VideoStream < 0) { + qDebug() << "Could not find video stream"; + return false; + } + + m_pVideoCodecContext = avcodec_alloc_context3(m_pVideoCodec); + avcodec_parameters_to_context(m_pVideoCodecContext, m_pFormatContext->streams[m_VideoStream]->codecpar); + + if (m_pVideoCodec == nullptr) { + // set to nullptr, otherwise avcodec_close(m_pVideoCodecContext) crashes + m_pVideoCodecContext = nullptr; + qDebug() << "Video Codec not found"; + return false; + } + + m_pVideoCodecContext->workaround_bugs = 1; + + if (avcodec_open2(m_pVideoCodecContext, m_pVideoCodec, nullptr) < 0) { + qDebug() << "Could not open video codec"; + return false; + } + + return true; +} + +int FrameDecoder::getWidth() +{ + if (m_pVideoCodecContext) { + return m_pVideoCodecContext->width; + } + + return -1; +} + +int FrameDecoder::getHeight() +{ + if (m_pVideoCodecContext) { + return m_pVideoCodecContext->height; + } + + return -1; +} + +int FrameDecoder::getDuration() +{ + if (m_pFormatContext) { + return static_cast(m_pFormatContext->duration / AV_TIME_BASE); + } + + return 0; +} + +void FrameDecoder::seek(int timeInSeconds) +{ + if (!m_AllowSeek) { + return; + } + + qint64 timestamp = AV_TIME_BASE * static_cast(timeInSeconds); + + if (timestamp < 0) { + timestamp = 0; + } + + int ret = av_seek_frame(m_pFormatContext, -1, timestamp, 0); + if (ret >= 0) { + avcodec_flush_buffers(m_pVideoCodecContext); + } else { + qDebug() << "Seeking in video failed"; + return; + } + + int keyFrameAttempts = 0; + bool gotFrame = 0; + + do { + int count = 0; + gotFrame = 0; + + while (!gotFrame && count < 20) { + getVideoPacket(); + gotFrame = decodeVideoPacket(); + ++count; + } + + ++keyFrameAttempts; + } while ((!gotFrame || !m_pFrame->key_frame) && keyFrameAttempts < 200); + + if (gotFrame == 0) { + qDebug() << "Seeking in video failed"; + } +} + +bool FrameDecoder::decodeVideoFrame() +{ + bool frameFinished = false; + + while (!frameFinished && getVideoPacket()) { + frameFinished = decodeVideoPacket(); + } + + if (!frameFinished) { + qDebug() << "decodeVideoFrame() failed: frame not finished"; + } + + return frameFinished; +} + +bool FrameDecoder::decodeVideoPacket() +{ + if (m_pPacket->stream_index != m_VideoStream) { + return false; + } + + av_frame_unref(m_pFrame); + + avcodec_send_packet(m_pVideoCodecContext, m_pPacket); + int ret = avcodec_receive_frame(m_pVideoCodecContext, m_pFrame); + if (ret == AVERROR(EAGAIN)) { + return false; + } + + return true; +} + +bool FrameDecoder::getVideoPacket() +{ + bool framesAvailable = true; + bool frameDecoded = false; + + int attempts = 0; + + if (m_pPacket) { + av_packet_unref(m_pPacket); + delete m_pPacket; + } + + m_pPacket = new AVPacket(); + + while (framesAvailable && !frameDecoded && (attempts++ < 1000)) { + framesAvailable = av_read_frame(m_pFormatContext, m_pPacket) >= 0; + if (framesAvailable) { + frameDecoded = m_pPacket->stream_index == m_VideoStream; + if (!frameDecoded) { + av_packet_unref(m_pPacket); + } + } + } + + return frameDecoded; +} + +void FrameDecoder::deleteFilterGraph() +{ + if (m_filterGraph) { + av_frame_free(&m_filterFrame); + avfilter_graph_free(&m_filterGraph); + m_filterGraph = nullptr; + } +} + +bool FrameDecoder::initFilterGraph(enum AVPixelFormat pixfmt, int width, int height) +{ + AVFilterInOut *inputs = nullptr, *outputs = nullptr; + + deleteFilterGraph(); + m_filterGraph = avfilter_graph_alloc(); + + QByteArray arguments("buffer="); + arguments += "video_size=" + QByteArray::number(width) + 'x' + QByteArray::number(height) + ':'; + arguments += "pix_fmt=" + QByteArray::number(pixfmt) + ':'; + arguments += "time_base=1/1:pixel_aspect=0/1[in];"; + arguments += "[in]yadif[out];"; + arguments += "[out]buffersink"; + + int ret = avfilter_graph_parse2(m_filterGraph, arguments.constData(), &inputs, &outputs); + if (ret < 0) { + qWarning() << "Unable to parse filter graph"; + return false; + } + + if (inputs || outputs) { + return false; + } + + ret = avfilter_graph_config(m_filterGraph, nullptr); + if (ret < 0) { + qWarning() << "Unable to validate filter graph"; + return false; + } + + m_bufferSourceContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffer_0"); + m_bufferSinkContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffersink_2"); + if (!m_bufferSourceContext || !m_bufferSinkContext) { + qWarning() << "Unable to get source or sink"; + return false; + } + m_filterFrame = av_frame_alloc(); + m_lastWidth = width; + m_lastHeight = height; + m_lastPixfmt = pixfmt; + + return true; +} + +bool FrameDecoder::processFilterGraph(AVFrame *dst, const AVFrame *src, enum AVPixelFormat pixfmt, int width, int height) +{ + if (!m_filterGraph || width != m_lastWidth || height != m_lastHeight || pixfmt != m_lastPixfmt) { + if (!initFilterGraph(pixfmt, width, height)) { + return false; + } + } + + memcpy(m_filterFrame->data, src->data, sizeof(src->data)); + memcpy(m_filterFrame->linesize, src->linesize, sizeof(src->linesize)); + m_filterFrame->width = width; + m_filterFrame->height = height; + m_filterFrame->format = pixfmt; + + int ret = av_buffersrc_add_frame(m_bufferSourceContext, m_filterFrame); + if (ret < 0) { + return false; + } + + ret = av_buffersink_get_frame(m_bufferSinkContext, m_filterFrame); + if (ret < 0) { + return false; + } + + av_image_copy(dst->data, dst->linesize, (const uint8_t **)m_filterFrame->data, m_filterFrame->linesize, pixfmt, width, height); + av_frame_unref(m_filterFrame); + + return true; +} + +void FrameDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, QImage &videoFrame) +{ + if (m_pFrame->interlaced_frame) { + processFilterGraph((AVFrame *)m_pFrame, (AVFrame *)m_pFrame, m_pVideoCodecContext->pix_fmt, m_pVideoCodecContext->width, m_pVideoCodecContext->height); + } + + int scaledWidth, scaledHeight; + convertAndScaleFrame(AV_PIX_FMT_RGB24, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); + // .copy() since QImage otherwise assumes the memory will continue to be available. + // We could instead pass a custom deleter, but meh. + videoFrame = QImage(m_pFrame->data[0], scaledWidth, scaledHeight, m_pFrame->linesize[0], QImage::Format_RGB888).copy(); +} + +void FrameDecoder::convertAndScaleFrame(AVPixelFormat format, int scaledSize, bool maintainAspectRatio, int &scaledWidth, int &scaledHeight) +{ + calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); + SwsContext *scaleContext = sws_getContext(m_pVideoCodecContext->width, + m_pVideoCodecContext->height, + m_pVideoCodecContext->pix_fmt, + scaledWidth, + scaledHeight, + format, + SWS_BICUBIC, + nullptr, + nullptr, + nullptr); + + if (nullptr == scaleContext) { + qDebug() << "Failed to create resize context"; + return; + } + + AVFrame *convertedFrame = nullptr; + uint8_t *convertedFrameBuffer = nullptr; + + createAVFrame(&convertedFrame, &convertedFrameBuffer, scaledWidth, scaledHeight, format); + + sws_scale(scaleContext, m_pFrame->data, m_pFrame->linesize, 0, m_pVideoCodecContext->height, convertedFrame->data, convertedFrame->linesize); + sws_freeContext(scaleContext); + + av_frame_free(&m_pFrame); + av_free(m_pFrameBuffer); + + m_pFrame = convertedFrame; + m_pFrameBuffer = convertedFrameBuffer; +} + +void FrameDecoder::calculateDimensions(int squareSize, bool maintainAspectRatio, int &destWidth, int &destHeight) +{ + if (!maintainAspectRatio) { + destWidth = squareSize; + destHeight = squareSize; + } else { + int srcWidth = m_pVideoCodecContext->width; + int srcHeight = m_pVideoCodecContext->height; + int ascpectNominator = m_pVideoCodecContext->sample_aspect_ratio.num; + int ascpectDenominator = m_pVideoCodecContext->sample_aspect_ratio.den; + + if (ascpectNominator != 0 && ascpectDenominator != 0) { + srcWidth = srcWidth * ascpectNominator / ascpectDenominator; + } + + if (srcWidth > srcHeight) { + destWidth = squareSize; + destHeight = int(float(squareSize) / srcWidth * srcHeight); + } else { + destWidth = int(float(squareSize) / srcHeight * srcWidth); + destHeight = squareSize; + } + } +} + +void FrameDecoder::createAVFrame(AVFrame **avFrame, quint8 **frameBuffer, int width, int height, AVPixelFormat format) +{ + *avFrame = av_frame_alloc(); + + int numBytes = av_image_get_buffer_size(format, width + 1, height + 1, 16); + *frameBuffer = reinterpret_cast(av_malloc(numBytes)); + av_image_fill_arrays((*avFrame)->data, (*avFrame)->linesize, *frameBuffer, format, width, height, 1); +} diff --git a/src/cpp/framedecoder.h b/src/cpp/framedecoder.h new file mode 100644 index 0000000..8530ada --- /dev/null +++ b/src/cpp/framedecoder.h @@ -0,0 +1,78 @@ +/* + SPDX-FileCopyrightText: 2010 Dirk Vanden Boer + + SPDX-License-Identifier: GPL-2.0-or-later +*/ + +#ifndef MOVIEDECODER_H +#define MOVIEDECODER_H + +#include + +class QImage; + +extern "C" { +#include +#include +#include +#include +#include +} + +class FrameDecoder +{ +public: + explicit FrameDecoder(const QString &filename, AVFormatContext *pavContext = nullptr); + ~FrameDecoder(); + + QString getCodec(); + void seek(int timeInSeconds); + bool decodeVideoFrame(); + void getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, QImage &videoFrame); + + int getWidth(); + int getHeight(); + int getDuration(); + + void initialize(const QString &filename); + void destroy(); + bool getInitialized(); + +private: + bool initializeVideo(); + + bool decodeVideoPacket(); + bool getVideoPacket(); + void convertAndScaleFrame(AVPixelFormat format, int scaledSize, bool maintainAspectRatio, int &scaledWidth, int &scaledHeight); + void createAVFrame(AVFrame **avFrame, quint8 **frameBuffer, int width, int height, AVPixelFormat format); + void calculateDimensions(int squareSize, bool maintainAspectRatio, int &destWidth, int &destHeight); + + void deleteFilterGraph(); + bool initFilterGraph(enum AVPixelFormat pixfmt, int width, int height); + bool processFilterGraph(AVFrame *dst, const AVFrame *src, enum AVPixelFormat pixfmt, int width, int height); + +private: + int m_VideoStream; + AVFormatContext *m_pFormatContext; + AVCodecContext *m_pVideoCodecContext; +#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(59, 0, 100) + AVCodec *m_pVideoCodec; +#else + const AVCodec *m_pVideoCodec; +#endif + AVFrame *m_pFrame; + quint8 *m_pFrameBuffer; + AVPacket *m_pPacket; + bool m_FormatContextWasGiven; + bool m_AllowSeek; + bool m_initialized; + AVFilterContext *m_bufferSinkContext; + AVFilterContext *m_bufferSourceContext; + AVFilterGraph *m_filterGraph; + AVFrame *m_filterFrame; + int m_lastWidth; + int m_lastHeight; + enum AVPixelFormat m_lastPixfmt; +}; + +#endif diff --git a/src/cpp/slide.cpp b/src/cpp/slide.cpp index ac23dfc..2e43d80 100644 --- a/src/cpp/slide.cpp +++ b/src/cpp/slide.cpp @@ -44,6 +44,11 @@ QString Slide::imageBackground() const return m_imageBackground; } +QString Slide::vidThumbnail() const +{ + return m_vidThumbnail; +} + QString Slide::videoBackground() const { return m_videoBackground; @@ -137,6 +142,16 @@ void Slide::setImageBackground(QString imageBackground) emit imageBackgroundChanged(m_imageBackground); } +void Slide::setVidThumbnail(QString vidThumbnail) +{ + if (m_vidThumbnail == vidThumbnail) + return; + + qDebug() << "####changing video thumbnail to: " << vidThumbnail; + m_vidThumbnail = vidThumbnail; + emit vidThumbnailChanged(m_vidThumbnail); +} + void Slide::setVideoBackground(QString videoBackground) { if (m_videoBackground == videoBackground) diff --git a/src/cpp/slide.h b/src/cpp/slide.h index f26149a..8197a13 100644 --- a/src/cpp/slide.h +++ b/src/cpp/slide.h @@ -28,6 +28,8 @@ class Slide : public QObject Q_PROPERTY(int slideIndex READ slideIndex WRITE setSlideIndex NOTIFY slideIndexChanged) Q_PROPERTY(bool active READ active WRITE setActive NOTIFY activeChanged) Q_PROPERTY(bool selected READ selected WRITE setSelected NOTIFY selectedChanged) + Q_PROPERTY(QString vidThumbnail READ vidThumbnail WRITE setVidThumbnail + NOTIFY vidThumbnailChanged) // QML_ELEMENT public: @@ -53,6 +55,7 @@ public: int serviceItemId() const; bool active() const; bool selected() const; + QString vidThumbnail() const; Q_INVOKABLE void setText(QString text); Q_INVOKABLE void setType(QString type); @@ -68,6 +71,7 @@ public: Q_INVOKABLE void setSlideIndex(int slideIndex); Q_INVOKABLE void setActive(bool active); Q_INVOKABLE void setSelected(bool selected); + Q_INVOKABLE void setVidThumbnail(QString vidThumbnail); signals: Q_INVOKABLE void textChanged(QString text); @@ -84,6 +88,7 @@ signals: Q_INVOKABLE void slideIndexChanged(int slideIndex); Q_INVOKABLE void activeChanged(bool active); Q_INVOKABLE void selectedChanged(bool selected); + Q_INVOKABLE void vidThumbnailChanged(QString vidThumbnail); private: int m_id; @@ -101,6 +106,7 @@ private: int m_slideIndex; bool m_active; bool m_selected; + QString m_vidThumbnail; }; #endif //SLIDE_H diff --git a/src/cpp/slidemodel.cpp b/src/cpp/slidemodel.cpp index 0a88cb8..5b9c08d 100644 --- a/src/cpp/slidemodel.cpp +++ b/src/cpp/slidemodel.cpp @@ -2,6 +2,8 @@ #include "mpv/mpvobject.h" #include "serviceitem.h" #include "slide.h" +#include "framedecoder.h" + #include #include #include @@ -83,6 +85,8 @@ QVariant SlideModel::data(const QModelIndex &index, int role) const { return item->active(); case SelectedRole: return item->selected(); + case VidThumbnailRole: + return item->vidThumbnail(); default: return QVariant(); } @@ -102,7 +106,8 @@ QHash SlideModel::roleNames() const { {VerticalTextAlignmentRole, "verticalTextAlignment"}, {SlideIndexRole, "slideIndex"}, {ActiveRole, "active"}, - {SelectedRole, "selected"} + {SelectedRole, "selected"}, + {VidThumbnailRole, "vidThumbnail"} }; return mapping; @@ -645,24 +650,15 @@ void SlideModel::moveRowFromService(const int &fromIndex, } } -QString SlideModel::thumbnailVideo(QString video, int serviceItemId) { - - qDebug() << "dir location " << writeDir.absolutePath(); - if (!writeDir.mkpath(".")) { - qFatal("Failed to create writable location at %s", qPrintable(writeDir.absolutePath())); - } +QString SlideModel::thumbnailVideo(QString video, int serviceItemId, int index) { QDir dir = writeDir.absolutePath() + "/librepresenter/thumbnails"; - qDebug() << "thumbnails dir: " << dir; QDir absDir = writeDir.absolutePath() + "/librepresenter"; if (!dir.exists()) { qDebug() << dir.path() << "does not exist"; absDir.mkdir("thumbnails"); } - qDebug() << "@@@@@@@@@@@@@@@@@@@@@"; - qDebug() << dir.path(); - qDebug() << "@@@@@@@@@@@@@@@@@@@@@"; - + QFileInfo vid(video); QString id; id.setNum(serviceItemId); @@ -670,18 +666,63 @@ QString SlideModel::thumbnailVideo(QString video, int serviceItemId) { qDebug() << vidName; QString thumbnail = dir.path() + "/" + vidName + ".webp"; QFileInfo thumbnailInfo(dir.path() + "/" + vidName + ".jpg"); - qDebug() << thumbnailInfo.filePath(); - // if (thumbnail.open(QIODevice::ReadOnly)) { - // qDebug() << "@@@@@@@@@@@@@@@@@@@@@"; - // qDebug() << thumbnailInfo.filePath(); - // qDebug() << "@@@@@@@@@@@@@@@@@@@@@"; - // } + qDebug() << thumbnailInfo.filePath() << "FOR" << index; + if (thumbnailInfo.exists()) { + for (int i = 0; i < rowCount(); i++) { + if (m_items[i]->serviceItemId() == serviceItemId) { + m_items[i]->setVidThumbnail("file://" + thumbnailInfo.absoluteFilePath()); + } + } + return thumbnailInfo.filePath(); + } - MpvObject *mpv; - mpv->loadFile(video); - mpv->seek(5); - mpv->screenshotToFile(thumbnail); + + QImage image; + QString filename = video.right(video.size() - 7); + image = frameToImage(filename, 576); + if (image.isNull()) { + qDebug() << QStringLiteral("Failed to create thumbnail for file: %1").arg(video); + return "failed"; + } + + qDebug() << "dir location " << writeDir.absolutePath(); + if (!writeDir.mkpath(".")) { + qFatal("Failed to create writable location at %s", qPrintable(writeDir.absolutePath())); + } + + if (!image.save(thumbnailInfo.filePath())) { + qDebug() << QStringLiteral("Failed to save thumbnail for file: %1").arg(video); + } + + for (int i = 0; i < rowCount(); i++) { + if (m_items[i]->serviceItemId() == serviceItemId) { + m_items[i]->setVidThumbnail("file://" + thumbnailInfo.absoluteFilePath()); + } + } + // MpvObject mpv; + // mpv.loadFile(video); + // mpv.seek(5); + // mpv.screenshotToFile(thumbnail); // mpv.quit(); return thumbnailInfo.filePath(); } + +QImage SlideModel::frameToImage(const QString &video, int width) +{ + QImage image; + FrameDecoder frameDecoder(video, nullptr); + if (!frameDecoder.getInitialized()) { + return image; + } + // before seeking, a frame has to be decoded + if (!frameDecoder.decodeVideoFrame()) { + return image; + } + + int secondToSeekTo = frameDecoder.getDuration() * 20 / 100; + frameDecoder.seek(secondToSeekTo); + frameDecoder.getScaledVideoFrame(width, true, image); + + return image; +} diff --git a/src/cpp/slidemodel.h b/src/cpp/slidemodel.h index 49a42c8..70aec9b 100644 --- a/src/cpp/slidemodel.h +++ b/src/cpp/slidemodel.h @@ -29,7 +29,8 @@ public: SlideIndexRole, ImageCountRole, ActiveRole, - SelectedRole + SelectedRole, + VidThumbnailRole }; // Basic functionality: @@ -79,7 +80,8 @@ public: Q_INVOKABLE QVariantMap getItem(int index) const; Q_INVOKABLE QVariantList getItems(); Q_INVOKABLE int findSlideIdFromServItm(int index); - Q_INVOKABLE QString thumbnailVideo(QString video, int serviceItemId); + Q_INVOKABLE QString thumbnailVideo(QString video, int serviceItemId, int index); + QImage frameToImage(const QString &video, int width); public slots: diff --git a/src/qml/presenter/Presentation.qml b/src/qml/presenter/Presentation.qml index 0765c4f..1b3d5ee 100644 --- a/src/qml/presenter/Presentation.qml +++ b/src/qml/presenter/Presentation.qml @@ -245,8 +245,8 @@ FocusScope { cellWidth: Kirigami.Units.gridUnit * 11 cellHeight: Kirigami.Units.gridUnit * 8 /* spacing: Kirigami.Units.smallSpacing * 2 */ - cacheBuffer: 200 - reuseItems: false + cacheBuffer: 800 + reuseItems: true clip: true model: SlideModel delegate: Presenter.PreviewSlideListDelegate { showVidBG: false } diff --git a/src/qml/presenter/PreviewSlideListDelegate.qml b/src/qml/presenter/PreviewSlideListDelegate.qml index edc893d..e9aed71 100644 --- a/src/qml/presenter/PreviewSlideListDelegate.qml +++ b/src/qml/presenter/PreviewSlideListDelegate.qml @@ -10,6 +10,11 @@ Item { implicitHeight: Kirigami.Units.gridUnit * 6.5 implicitWidth: Kirigami.Units.gridUnit * 10 property bool showVidBG + Component.onCompleted: { + if (model.videoBackground != "") + SlideModel.thumbnailVideo(model.videoBackground, model.serviceItemId, index); + } + Rectangle { id: previewHighlight anchors.centerIn: parent @@ -24,21 +29,17 @@ Item { Kirigami.Theme.backgroundColor } - Presenter.Slide { + Presenter.PreviewSlide { id: previewSlideItem anchors.centerIn: parent implicitWidth: height / 9 * 16 implicitHeight: parent.height - Kirigami.Units.smallSpacing * 2 textSize: model.fontSize itemType: model.type - imageSource: model.imageBackground - videoSource: showVidBG ? model.videoBackground : "" - audioSource: "" + imageSource: model.videoBackground != "" ? model.vidThumbnail : model.imageBackground chosenFont: model.font text: model.text pdfIndex: slideIndex - preview: true - editMode: true } } @@ -59,8 +60,8 @@ Item { anchors.fill: parent hoverEnabled: true onClicked: { - /* changeSlide(index); */ - showPassiveNotification(SlideModel.thumbnailVideo(model.videoBackground, model.serviceItemId)); + changeSlide(index); + /* showPassiveNotification(SlideModel.thumbnailVideo(model.videoBackground, model.serviceItemId, index)); */ } cursorShape: Qt.PointingHandCursor propagateComposedEvents: true @@ -69,7 +70,7 @@ Item { Connections { target: SlideModel - onDataChanged: { + function onDataChanged() { if (active) { previewSlidesList.currentIndex = index; previewSlidesList.positionViewAtIndex(index, ListView.Contain);