feat(*): rewritten the project with C/C++ (#1172)

This commit is contained in:
K4YT3X
2024-10-07 19:29:00 -07:00
committed by GitHub
parent 721de8cbce
commit a7952fc493
80 changed files with 6664 additions and 5734 deletions

202
src/conversions.cpp Normal file
View File

@@ -0,0 +1,202 @@
#include <cstdio>
// FFmpeg includes
extern "C" {
#include <libavutil/frame.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
// ncnn includes
#include <mat.h>
#include "conversions.h"
// Convert AVFrame format
AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
AVFrame *dst_frame = av_frame_alloc();
if (dst_frame == nullptr) {
fprintf(stderr, "Failed to allocate destination AVFrame.\n");
return nullptr;
}
dst_frame->format = pix_fmt;
dst_frame->width = src_frame->width;
dst_frame->height = src_frame->height;
// Allocate memory for the converted frame
if (av_frame_get_buffer(dst_frame, 32) < 0) {
fprintf(stderr, "Failed to allocate memory for AVFrame.\n");
av_frame_free(&dst_frame);
return nullptr;
}
// Create a SwsContext for pixel format conversion
SwsContext *sws_ctx = sws_getContext(
src_frame->width,
src_frame->height,
static_cast<AVPixelFormat>(src_frame->format),
dst_frame->width,
dst_frame->height,
pix_fmt,
SWS_BILINEAR,
nullptr,
nullptr,
nullptr
);
if (sws_ctx == nullptr) {
fprintf(stderr, "Failed to initialize swscale context.\n");
av_frame_free(&dst_frame);
return nullptr;
}
// Perform the conversion
sws_scale(
sws_ctx,
src_frame->data,
src_frame->linesize,
0,
src_frame->height,
dst_frame->data,
dst_frame->linesize
);
// Clean up
sws_freeContext(sws_ctx);
return dst_frame;
}
// Convert AVFrame to ncnn::Mat by copying the data
ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
AVFrame *converted_frame = nullptr;
// Convert to BGR24 format if necessary
if (frame->format != AV_PIX_FMT_BGR24) {
converted_frame = convert_avframe_pix_fmt(frame, AV_PIX_FMT_BGR24);
if (!converted_frame) {
fprintf(stderr, "Failed to convert AVFrame to BGR24.\n");
return ncnn::Mat(); // Return an empty ncnn::Mat on failure
}
} else {
converted_frame = frame; // If the frame is already in BGR24, use it directly
}
// Allocate a new ncnn::Mat and copy the data
int width = converted_frame->width;
int height = converted_frame->height;
ncnn::Mat ncnn_image = ncnn::Mat(width, height, (size_t)3, 3); // BGR has 3 channels
// Manually copy the pixel data from AVFrame to the new ncnn::Mat
const uint8_t *src_data = converted_frame->data[0];
for (int y = 0; y < height; y++) {
uint8_t *dst_row = ncnn_image.row<uint8_t>(y);
const uint8_t *src_row = src_data + y * converted_frame->linesize[0];
memcpy(dst_row, src_row, width * 3); // Copy 3 channels (BGR) per pixel
}
// If we allocated a converted frame, free it
if (converted_frame != frame) {
av_frame_free(&converted_frame);
}
return ncnn_image;
}
// Convert ncnn::Mat to AVFrame with a specified pixel format (this part is unchanged)
AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
int ret;
// Step 1: Allocate a destination AVFrame for the specified pixel format
AVFrame *dst_frame = av_frame_alloc();
if (!dst_frame) {
fprintf(stderr, "Failed to allocate destination AVFrame.\n");
return nullptr;
}
dst_frame->format = pix_fmt;
dst_frame->width = mat.w;
dst_frame->height = mat.h;
// Allocate memory for the frame buffer
if (av_frame_get_buffer(dst_frame, 32) < 0) {
fprintf(stderr, "Failed to allocate memory for destination AVFrame.\n");
av_frame_free(&dst_frame);
return nullptr;
}
// Step 2: Convert ncnn::Mat to BGR AVFrame
AVFrame *bgr_frame = av_frame_alloc();
if (!bgr_frame) {
fprintf(stderr, "Failed to allocate intermediate BGR AVFrame.\n");
av_frame_free(&dst_frame);
return nullptr;
}
bgr_frame->format = AV_PIX_FMT_BGR24;
bgr_frame->width = mat.w;
bgr_frame->height = mat.h;
// Allocate memory for the intermediate BGR frame
if (av_frame_get_buffer(bgr_frame, 32) < 0) {
fprintf(stderr, "Failed to allocate memory for BGR AVFrame.\n");
av_frame_free(&dst_frame);
av_frame_free(&bgr_frame);
return nullptr;
}
// Copy data from ncnn::Mat to the BGR AVFrame
// mat.to_pixels(bgr_frame->data[0], ncnn::Mat::PIXEL_BGR);
// Manually copy the pixel data from ncnn::Mat to the BGR AVFrame
for (int y = 0; y < mat.h; y++) {
uint8_t *dst_row = bgr_frame->data[0] + y * bgr_frame->linesize[0];
const uint8_t *src_row = mat.row<const uint8_t>(y);
memcpy(dst_row, src_row, mat.w * 3); // Copy 3 channels (BGR) per pixel
}
// Step 3: Convert the BGR frame to the desired pixel format
SwsContext *sws_ctx = sws_getContext(
bgr_frame->width,
bgr_frame->height,
AV_PIX_FMT_BGR24,
dst_frame->width,
dst_frame->height,
pix_fmt,
SWS_BILINEAR,
nullptr,
nullptr,
nullptr
);
if (sws_ctx == nullptr) {
fprintf(stderr, "Failed to initialize swscale context.\n");
av_frame_free(&bgr_frame);
av_frame_free(&dst_frame);
return nullptr;
}
// Perform the conversion
ret = sws_scale(
sws_ctx,
bgr_frame->data,
bgr_frame->linesize,
0,
bgr_frame->height,
dst_frame->data,
dst_frame->linesize
);
// Clean up
sws_freeContext(sws_ctx);
av_frame_free(&bgr_frame);
if (ret != dst_frame->height) {
fprintf(stderr, "Failed to convert BGR AVFrame to destination pixel format.\n");
av_frame_free(&dst_frame);
return nullptr;
}
return dst_frame;
}

79
src/decoder.cpp Normal file
View File

@@ -0,0 +1,79 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/rational.h>
}
int init_decoder(
const char *input_filename,
AVFormatContext **fmt_ctx,
AVCodecContext **dec_ctx,
int *video_stream_index
) {
AVFormatContext *ifmt_ctx = NULL;
AVCodecContext *codec_ctx = NULL;
int ret;
if ((ret = avformat_open_input(&ifmt_ctx, input_filename, NULL, NULL)) < 0) {
fprintf(stderr, "Could not open input file '%s'\n", input_filename);
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information\n");
return ret;
}
// Find the first video stream
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find video stream in the input, aborting\n");
return ret;
}
int stream_index = ret;
AVStream *video_stream = ifmt_ctx->streams[stream_index];
// Set up the decoder
const AVCodec *dec = avcodec_find_decoder(video_stream->codecpar->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find decoder for stream #%u\n", stream_index);
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(dec);
if (!codec_ctx) {
fprintf(stderr, "Failed to allocate the decoder context\n");
return AVERROR(ENOMEM);
}
if ((ret = avcodec_parameters_to_context(codec_ctx, video_stream->codecpar)) < 0) {
fprintf(stderr, "Failed to copy decoder parameters to input decoder context\n");
return ret;
}
// Set decoder time base and frame rate
codec_ctx->time_base = video_stream->time_base;
codec_ctx->pkt_timebase = video_stream->time_base;
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video_stream, NULL);
if ((ret = avcodec_open2(codec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open decoder for stream #%u\n", stream_index);
return ret;
}
*fmt_ctx = ifmt_ctx;
*dec_ctx = codec_ctx;
*video_stream_index = stream_index;
return 0;
}

206
src/encoder.cpp Normal file
View File

@@ -0,0 +1,206 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavcodec/codec.h>
#include <libavcodec/codec_id.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/rational.h>
}
#include "conversions.h"
#include "libvideo2x.h"
int init_encoder(
const char *output_filename,
AVFormatContext **ofmt_ctx,
AVCodecContext **enc_ctx,
AVCodecContext *dec_ctx,
EncoderConfig *encoder_config
) {
AVFormatContext *fmt_ctx = NULL;
AVCodecContext *codec_ctx = NULL;
int ret;
avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, output_filename);
if (!fmt_ctx) {
fprintf(stderr, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
// Create a new video stream
const AVCodec *enc = avcodec_find_encoder(encoder_config->codec);
if (!enc) {
fprintf(stderr, "Necessary encoder not found\n");
return AVERROR_ENCODER_NOT_FOUND;
}
AVStream *out_stream = avformat_new_stream(fmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
codec_ctx = avcodec_alloc_context3(enc);
if (!codec_ctx) {
fprintf(stderr, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
// Set encoding parameters
codec_ctx->height = encoder_config->output_height;
codec_ctx->width = encoder_config->output_width;
codec_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
codec_ctx->pix_fmt = encoder_config->pix_fmt;
codec_ctx->time_base = av_inv_q(dec_ctx->framerate);
if (codec_ctx->time_base.num == 0 || codec_ctx->time_base.den == 0) {
codec_ctx->time_base = av_inv_q(av_guess_frame_rate(fmt_ctx, out_stream, NULL));
}
// Set the bit rate and other encoder parameters if needed
codec_ctx->bit_rate = encoder_config->bit_rate;
codec_ctx->gop_size = 60; // Keyframe interval
codec_ctx->max_b_frames = 3; // B-frames
codec_ctx->keyint_min = 60; // Maximum GOP size
char crf_str[16];
snprintf(crf_str, sizeof(crf_str), "%.f", encoder_config->crf);
if (encoder_config->codec == AV_CODEC_ID_H264 || encoder_config->codec == AV_CODEC_ID_HEVC) {
av_opt_set(codec_ctx->priv_data, "crf", crf_str, 0);
av_opt_set(codec_ctx->priv_data, "preset", encoder_config->preset, 0);
}
if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if ((ret = avcodec_open2(codec_ctx, enc, NULL)) < 0) {
fprintf(stderr, "Cannot open video encoder\n");
return ret;
}
ret = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
if (ret < 0) {
fprintf(stderr, "Failed to copy encoder parameters to output stream\n");
return ret;
}
out_stream->time_base = codec_ctx->time_base;
// Open the output file
if (!(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&fmt_ctx->pb, output_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'\n", output_filename);
return ret;
}
}
*ofmt_ctx = fmt_ctx;
*enc_ctx = codec_ctx;
return 0;
}
int encode_and_write_frame(AVFrame *frame, AVCodecContext *enc_ctx, AVFormatContext *ofmt_ctx) {
int ret;
// Convert the frame to the encoder's pixel format if needed
if (frame->format != enc_ctx->pix_fmt) {
AVFrame *converted_frame = convert_avframe_pix_fmt(frame, enc_ctx->pix_fmt);
if (!converted_frame) {
fprintf(stderr, "Error converting frame to encoder's pixel format\n");
return AVERROR_EXTERNAL;
}
converted_frame->pts = frame->pts;
frame = converted_frame;
}
AVPacket *enc_pkt = av_packet_alloc();
if (!enc_pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
return AVERROR(ENOMEM);
}
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0) {
fprintf(stderr, "Error sending frame to encoder\n");
av_packet_free(&enc_pkt);
return ret;
}
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, enc_pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
av_packet_unref(enc_pkt);
break;
} else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
av_packet_free(&enc_pkt);
return ret;
}
// Rescale packet timestamps
av_packet_rescale_ts(enc_pkt, enc_ctx->time_base, ofmt_ctx->streams[0]->time_base);
enc_pkt->stream_index = ofmt_ctx->streams[0]->index;
// Write the packet
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
av_packet_unref(enc_pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
av_packet_free(&enc_pkt);
return ret;
}
}
av_packet_free(&enc_pkt);
return 0;
}
int flush_encoder(AVCodecContext *enc_ctx, AVFormatContext *ofmt_ctx) {
int ret;
AVPacket *enc_pkt = av_packet_alloc();
if (!enc_pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
return AVERROR(ENOMEM);
}
ret = avcodec_send_frame(enc_ctx, NULL);
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, enc_pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
av_packet_unref(enc_pkt);
break;
} else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
av_packet_free(&enc_pkt);
return ret;
}
// Rescale packet timestamps
av_packet_rescale_ts(enc_pkt, enc_ctx->time_base, ofmt_ctx->streams[0]->time_base);
enc_pkt->stream_index = ofmt_ctx->streams[0]->index;
// Write the packet
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
av_packet_unref(enc_pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
av_packet_free(&enc_pkt);
return ret;
}
}
av_packet_free(&enc_pkt);
return 0;
}

94
src/fsutils.cpp Normal file
View File

@@ -0,0 +1,94 @@
#include <filesystem>
#if _WIN32
#include <windows.h>
#include <cwchar>
#else
#include <unistd.h>
#include <cstring>
#endif
#include "fsutils.h"
#if _WIN32
std::filesystem::path get_executable_directory() {
std::vector<wchar_t> filepath(MAX_PATH);
// Get the executable path, expanding the buffer if necessary
DWORD size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
if (size == 0) {
fprintf(stderr, "Error getting executable path: %lu\n", GetLastError());
return std::filesystem::path();
}
// Resize the buffer if necessary
while (size >= filepath.size()) {
filepath.resize(filepath.size() * 2);
size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
if (size == 0) {
fprintf(stderr, "Error getting executable path: %lu\n", GetLastError());
return std::filesystem::path();
}
}
// Create a std::filesystem::path from the filepath and return its parent path
std::filesystem::path execpath(filepath.data());
return execpath.parent_path();
}
#else // _WIN32
std::filesystem::path get_executable_directory() {
std::error_code ec;
std::filesystem::path filepath = std::filesystem::read_symlink("/proc/self/exe", ec);
if (ec) {
fprintf(stderr, "Error reading /proc/self/exe: %s\n", ec.message().c_str());
return std::filesystem::path();
}
return filepath.parent_path();
}
#endif // _WIN32
bool filepath_is_readable(const std::filesystem::path &path) {
#if _WIN32
FILE *fp = _wfopen(path.c_str(), L"rb");
#else // _WIN32
FILE *fp = fopen(path.c_str(), "rb");
#endif // _WIN32
if (!fp) {
return false;
}
fclose(fp);
return true;
}
std::filesystem::path find_resource_file(const std::filesystem::path &path) {
if (filepath_is_readable(path)) {
return path;
}
if (filepath_is_readable(std::filesystem::path("/usr/share/video2x/") / path)) {
return std::filesystem::path("/usr/share/video2x/") / path;
}
return get_executable_directory() / path;
}
std::string path_to_string(const std::filesystem::path &path) {
#if _WIN32
std::wstring wide_path = path.wstring();
int buffer_size =
WideCharToMultiByte(CP_UTF8, 0, wide_path.c_str(), -1, nullptr, 0, nullptr, nullptr);
if (buffer_size == 0) {
return std::string();
}
std::vector<char> buffer(buffer_size);
WideCharToMultiByte(
CP_UTF8, 0, wide_path.c_str(), -1, buffer.data(), buffer_size, nullptr, nullptr
);
return std::string(buffer.data());
#else
return path.string();
#endif
}

249
src/getopt.c Normal file
View File

@@ -0,0 +1,249 @@
/*
* Copyright (c) 1987, 1993, 1994, 1996
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
* IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "getopt.h"
/*
extern int opterr;
extern int optind;
extern int optopt;
extern int optreset;
extern char *optarg;
*/
int opterr = 1; /* if error message should be printed */
int optind = 1; /* index into parent argv vector */
int optopt = 0; /* character checked for validity */
int optreset = 0; /* reset getopt */
char *optarg = NULL; /* argument associated with option */
#ifndef __P
#define __P(x) x
#endif
#define _DIAGASSERT(x) assert(x)
static char *__progname __P((char *));
int getopt_internal __P((int, char *const *, const char *));
static char *__progname(char *nargv0) {
char *tmp;
_DIAGASSERT(nargv0 != NULL);
tmp = strrchr(nargv0, '/');
if (tmp) {
tmp++;
} else {
tmp = nargv0;
}
return (tmp);
}
#define BADCH (int)'?'
#define BADARG (int)':'
#define EMSG ""
/*
* getopt --
* Parse argc/argv argument vector.
*/
int getopt_internal(int nargc, char *const *nargv, const char *ostr) {
static char *place = EMSG; /* option letter processing */
char *oli; /* option letter list index */
_DIAGASSERT(nargv != NULL);
_DIAGASSERT(ostr != NULL);
if (optreset || !*place) { /* update scanning pointer */
optreset = 0;
if (optind >= nargc || *(place = nargv[optind]) != '-') {
place = EMSG;
return (-1);
}
if (place[1] && *++place == '-') { /* found "--" */
/* ++optind; */
place = EMSG;
return (-2);
}
} /* option letter okay? */
if ((optopt = (int)*place++) == (int)':' || !(oli = strchr(ostr, optopt))) {
/*
* if the user didn't specify '-' as an option,
* assume it means -1.
*/
if (optopt == (int)'-') {
return (-1);
}
if (!*place) {
++optind;
}
if (opterr && *ostr != ':') {
(void)fprintf(stderr, "%s: illegal option -- %c\n", __progname(nargv[0]), optopt);
}
return (BADCH);
}
if (*++oli != ':') { /* don't need argument */
optarg = NULL;
if (!*place) {
++optind;
}
} else { /* need an argument */
if (*place) { /* no white space */
optarg = place;
} else if (nargc <= ++optind) { /* no arg */
place = EMSG;
if ((opterr) && (*ostr != ':')) {
(void)fprintf(
stderr, "%s: option requires an argument -- %c\n", __progname(nargv[0]), optopt
);
}
return (BADARG);
} else { /* white space */
optarg = nargv[optind];
}
place = EMSG;
++optind;
}
return (optopt); /* dump back option letter */
}
#if 0
/*
* getopt --
* Parse argc/argv argument vector.
*/
int
getopt2(nargc, nargv, ostr)
int nargc;
char * const *nargv;
const char *ostr;
{
int retval;
if ((retval = getopt_internal(nargc, nargv, ostr)) == -2) {
retval = -1;
++optind;
}
return(retval);
}
#endif
/*
* getopt_long --
* Parse argc/argv argument vector.
*/
int getopt_long(
int nargc,
char **nargv,
const char *options,
const struct option *long_options,
int *index
) {
int retval;
_DIAGASSERT(nargv != NULL);
_DIAGASSERT(options != NULL);
_DIAGASSERT(long_options != NULL);
/* index may be NULL */
if ((retval = getopt_internal(nargc, nargv, options)) == -2) {
char *current_argv = nargv[optind++] + 2, *has_equal;
int i, match = -1;
size_t current_argv_len;
if (*current_argv == '\0') {
return (-1);
}
if ((has_equal = strchr(current_argv, '=')) != NULL) {
current_argv_len = has_equal - current_argv;
has_equal++;
} else {
current_argv_len = strlen(current_argv);
}
for (i = 0; long_options[i].name; i++) {
if (strncmp(current_argv, long_options[i].name, current_argv_len)) {
continue;
}
if (strlen(long_options[i].name) == current_argv_len) {
match = i;
break;
}
if (match == -1) {
match = i;
}
}
if (match != -1) {
if (long_options[match].has_arg == required_argument ||
long_options[match].has_arg == optional_argument) {
if (has_equal) {
optarg = has_equal;
} else {
optarg = nargv[optind++];
}
}
if ((long_options[match].has_arg == required_argument) && (optarg == NULL)) {
/*
* Missing argument, leading :
* indicates no error should be generated
*/
if ((opterr) && (*options != ':')) {
(void)fprintf(
stderr,
"%s: option requires an argument -- %s\n",
__progname(nargv[0]),
current_argv
);
}
return (BADARG);
}
} else { /* No matching argument */
if ((opterr) && (*options != ':')) {
(void
)fprintf(stderr, "%s: illegal option -- %s\n", __progname(nargv[0]), current_argv);
}
return (BADCH);
}
if (long_options[match].flag) {
*long_options[match].flag = long_options[match].val;
retval = 0;
} else {
retval = long_options[match].val;
}
if (index) {
*index = match;
}
}
return (retval);
}

161
src/libplacebo.cpp Normal file
View File

@@ -0,0 +1,161 @@
#include <stdio.h>
#include <stdlib.h>
#include <filesystem>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavutil/buffer.h>
#include <libavutil/hwcontext.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/rational.h>
#include <libswscale/swscale.h>
}
#include "fsutils.h"
int init_libplacebo(
AVFilterGraph **filter_graph,
AVFilterContext **buffersrc_ctx,
AVFilterContext **buffersink_ctx,
AVBufferRef **device_ctx,
AVCodecContext *dec_ctx,
int output_width,
int output_height,
const std::filesystem::path &shader_path
) {
char args[512];
int ret;
// Initialize the Vulkan hardware device
AVBufferRef *hw_device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VULKAN);
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VULKAN, NULL, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Unable to initialize Vulkan device\n");
return ret;
}
AVFilterGraph *graph = avfilter_graph_alloc();
if (!graph) {
fprintf(stderr, "Unable to create filter graph.\n");
return AVERROR(ENOMEM);
}
// Create buffer source
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
snprintf(
args,
sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:frame_rate=%d/%d:"
"pixel_aspect=%d/%d:colorspace=%d",
dec_ctx->width,
dec_ctx->height,
dec_ctx->pix_fmt,
dec_ctx->time_base.num,
dec_ctx->time_base.den,
dec_ctx->framerate.num,
dec_ctx->framerate.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den,
dec_ctx->colorspace
);
ret = avfilter_graph_create_filter(buffersrc_ctx, buffersrc, "in", args, NULL, graph);
if (ret < 0) {
fprintf(stderr, "Cannot create buffer source\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
AVFilterContext *last_filter = *buffersrc_ctx;
// Create the libplacebo filter
const AVFilter *libplacebo_filter = avfilter_get_by_name("libplacebo");
if (!libplacebo_filter) {
fprintf(stderr, "Filter 'libplacebo' not found\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return AVERROR_FILTER_NOT_FOUND;
}
// Convert the shader path to a string since filter args is const char *
std::string shader_path_string = path_to_string(shader_path);
#ifdef _WIN32
// libplacebo does not recognize the Windows '\\' path separator
std::replace(shader_path_string.begin(), shader_path_string.end(), '\\', '/');
#endif
// Prepare the filter arguments
char filter_args[512];
snprintf(
filter_args,
sizeof(filter_args),
"w=%d:h=%d:upscaler=ewa_lanczos:custom_shader_path=%s",
output_width,
output_height,
shader_path_string.c_str()
);
AVFilterContext *libplacebo_ctx;
ret = avfilter_graph_create_filter(
&libplacebo_ctx, libplacebo_filter, "libplacebo", filter_args, NULL, graph
);
if (ret < 0) {
fprintf(stderr, "Cannot create libplacebo filter\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
// Set the hardware device context to Vulkan
libplacebo_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
// Link buffersrc to libplacebo
ret = avfilter_link(last_filter, 0, libplacebo_ctx, 0);
if (ret < 0) {
fprintf(stderr, "Error connecting buffersrc to libplacebo filter\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
last_filter = libplacebo_ctx;
// Create buffer sink
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, graph);
if (ret < 0) {
fprintf(stderr, "Cannot create buffer sink\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
// Link libplacebo to buffersink
ret = avfilter_link(last_filter, 0, *buffersink_ctx, 0);
if (ret < 0) {
fprintf(stderr, "Error connecting libplacebo filter to buffersink\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
// Configure the filter graph
ret = avfilter_graph_config(graph, NULL);
if (ret < 0) {
fprintf(stderr, "Error configuring the filter graph\n");
av_buffer_unref(&hw_device_ctx);
avfilter_graph_free(&graph);
return ret;
}
*filter_graph = graph;
*device_ctx = hw_device_ctx;
return 0;
}

139
src/libplacebo_filter.cpp Normal file
View File

@@ -0,0 +1,139 @@
#include <cstdio>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/buffer.h>
}
#include "fsutils.h"
#include "libplacebo.h"
#include "libplacebo_filter.h"
LibplaceboFilter::LibplaceboFilter(int width, int height, const std::filesystem::path &shader_path)
: filter_graph(nullptr),
buffersrc_ctx(nullptr),
buffersink_ctx(nullptr),
device_ctx(nullptr),
output_width(width),
output_height(height),
shader_path(std::move(shader_path)) {}
LibplaceboFilter::~LibplaceboFilter() {
if (buffersrc_ctx) {
avfilter_free(buffersrc_ctx);
buffersrc_ctx = nullptr;
}
if (buffersink_ctx) {
avfilter_free(buffersink_ctx);
buffersink_ctx = nullptr;
}
if (device_ctx) {
av_buffer_unref(&device_ctx);
device_ctx = nullptr;
}
if (filter_graph) {
avfilter_graph_free(&filter_graph);
filter_graph = nullptr;
}
}
int LibplaceboFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx) {
// Construct the shader path
std::filesystem::path shader_full_path;
if (filepath_is_readable(shader_path)) {
// If the shader path is directly readable, use it
shader_full_path = shader_path;
} else {
// Construct the fallback path using std::filesystem
shader_full_path =
find_resource_file(std::filesystem::path("models") / (shader_path.string() + ".glsl"));
}
// Save the output time base
output_time_base = enc_ctx->time_base;
return init_libplacebo(
&filter_graph,
&buffersrc_ctx,
&buffersink_ctx,
&device_ctx,
dec_ctx,
output_width,
output_height,
shader_full_path
);
}
AVFrame *LibplaceboFilter::process_frame(AVFrame *input_frame) {
int ret;
// Get the filtered frame
AVFrame *output_frame = av_frame_alloc();
if (output_frame == nullptr) {
fprintf(stderr, "Failed to allocate output frame\n");
return nullptr;
}
// Feed the frame to the filter graph
ret = av_buffersrc_add_frame(buffersrc_ctx, input_frame);
if (ret < 0) {
fprintf(stderr, "Error while feeding the filter graph\n");
return nullptr;
}
ret = av_buffersink_get_frame(buffersink_ctx, output_frame);
if (ret < 0) {
av_frame_free(&output_frame);
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error getting frame from filter graph: %s\n", errbuf);
return nullptr;
}
return (AVFrame *)-1;
}
// Rescale PTS to encoder's time base
output_frame->pts =
av_rescale_q(output_frame->pts, buffersink_ctx->inputs[0]->time_base, output_time_base);
// Return the processed frame to the caller
return output_frame;
}
int LibplaceboFilter::flush(std::vector<AVFrame *> &processed_frames) {
int ret = av_buffersrc_add_frame(buffersrc_ctx, nullptr); // Signal EOF to the filter graph
if (ret < 0) {
fprintf(stderr, "Error while flushing filter graph\n");
return ret;
}
// Retrieve all remaining frames from the filter graph
while (1) {
AVFrame *filt_frame = av_frame_alloc();
if (filt_frame == nullptr) {
return AVERROR(ENOMEM);
}
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
av_frame_free(&filt_frame);
break;
}
if (ret < 0) {
av_frame_free(&filt_frame);
return ret;
}
// Rescale PTS to encoder's time base
filt_frame->pts =
av_rescale_q(filt_frame->pts, buffersink_ctx->inputs[0]->time_base, output_time_base);
// Add to processed frames
processed_frames.push_back(filt_frame);
}
return 0;
}

327
src/libvideo2x.cpp Normal file
View File

@@ -0,0 +1,327 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cstdint>
// FFmpeg headers
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
#include "decoder.h"
#include "encoder.h"
#include "filter.h"
#include "libplacebo_filter.h"
#include "libvideo2x.h"
#include "realesrgan_filter.h"
// Function to process frames using the selected filter (same as before)
int process_frames(
ProcessingStatus *status,
AVFormatContext *fmt_ctx,
AVFormatContext *ofmt_ctx,
AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx,
Filter *filter,
int video_stream_index
) {
int ret;
AVPacket packet;
std::vector<AVFrame *> flushed_frames;
char errbuf[AV_ERROR_MAX_STRING_SIZE];
// Get the total number of frames in the video
AVStream *video_stream = fmt_ctx->streams[video_stream_index];
status->total_frames = video_stream->nb_frames;
// If nb_frames is not set, calculate total frames using duration and frame rate
if (status->total_frames == 0) {
int64_t duration = video_stream->duration;
AVRational frame_rate = video_stream->avg_frame_rate;
if (duration != AV_NOPTS_VALUE && frame_rate.num != 0 && frame_rate.den != 0) {
status->total_frames = duration * frame_rate.num / frame_rate.den;
}
}
// Get start time
status->start_time = time(NULL);
if (status->start_time == -1) {
perror("time");
}
AVFrame *frame = av_frame_alloc();
if (frame == nullptr) {
ret = AVERROR(ENOMEM);
goto end;
}
// Read frames from the input file
while (1) {
ret = av_read_frame(fmt_ctx, &packet);
if (ret < 0) {
break; // End of file or error
}
if (packet.stream_index == video_stream_index) {
// Send the packet to the decoder
ret = avcodec_send_packet(dec_ctx, &packet);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error sending packet to decoder: %s\n", errbuf);
av_packet_unref(&packet);
goto end;
}
// Receive and process frames from the decoder
while (1) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error decoding video frame: %s\n", errbuf);
goto end;
}
// Process the frame using the selected filter
AVFrame *processed_frame = filter->process_frame(frame);
if (processed_frame != nullptr && processed_frame != (AVFrame *)-1) {
// Encode and write the processed frame
ret = encode_and_write_frame(processed_frame, enc_ctx, ofmt_ctx);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error encoding/writing frame: %s\n", errbuf);
av_frame_free(&processed_frame);
goto end;
}
av_frame_free(&processed_frame);
status->processed_frames++;
} else if (processed_frame != (AVFrame *)-1) {
fprintf(stderr, "Error processing frame\n");
goto end;
}
av_frame_unref(frame);
// Print the processing status
printf(
"\r[Video2X] Processing frame %ld/%ld (%.2f%%); time elapsed: %lds",
status->processed_frames,
status->total_frames,
status->processed_frames * 100.0 / status->total_frames,
time(NULL) - status->start_time
);
fflush(stdout);
}
}
av_packet_unref(&packet);
}
// Print a newline after processing all frames
printf("\n");
// Flush the filter
ret = filter->flush(flushed_frames);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error flushing filter: %s\n", errbuf);
goto end;
}
// Encode and write all flushed frames
for (AVFrame *&flushed_frame : flushed_frames) {
ret = encode_and_write_frame(flushed_frame, enc_ctx, ofmt_ctx);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error encoding/writing flushed frame: %s\n", errbuf);
av_frame_free(&flushed_frame);
flushed_frame = nullptr;
goto end;
}
av_frame_free(&flushed_frame);
flushed_frame = nullptr;
}
// Flush the encoder
ret = flush_encoder(enc_ctx, ofmt_ctx);
if (ret < 0) {
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error flushing encoder: %s\n", errbuf);
goto end;
}
end:
av_frame_free(&frame);
// Free any flushed frames not yet freed
for (AVFrame *flushed_frame : flushed_frames) {
if (flushed_frame) {
av_frame_free(&flushed_frame);
}
}
return ret;
}
// Cleanup helper function
void cleanup(
AVFormatContext *fmt_ctx,
AVFormatContext *ofmt_ctx,
AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx,
Filter *filter
) {
if (filter) {
delete filter;
}
if (dec_ctx) {
avcodec_free_context(&dec_ctx);
}
if (enc_ctx) {
avcodec_free_context(&enc_ctx);
}
if (fmt_ctx) {
avformat_close_input(&fmt_ctx);
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&ofmt_ctx->pb);
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
}
}
// Main function to process the video
extern "C" int process_video(
const char *input_filename,
const char *output_filename,
const FilterConfig *filter_config,
EncoderConfig *encoder_config,
ProcessingStatus *status
) {
AVFormatContext *fmt_ctx = nullptr;
AVFormatContext *ofmt_ctx = nullptr;
AVCodecContext *dec_ctx = nullptr;
AVCodecContext *enc_ctx = nullptr;
Filter *filter = nullptr;
int video_stream_index = -1;
int ret = 0; // Initialize ret with 0 to assume success
// Initialize input
if (init_decoder(input_filename, &fmt_ctx, &dec_ctx, &video_stream_index) < 0) {
fprintf(stderr, "Failed to initialize decoder\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Initialize output based on Libplacebo or RealESRGAN configuration
int output_width = 0, output_height = 0;
switch (filter_config->filter_type) {
case FILTER_LIBPLACEBO:
output_width = filter_config->config.libplacebo.output_width;
output_height = filter_config->config.libplacebo.output_height;
break;
case FILTER_REALESRGAN:
// Calculate the output dimensions based on the scaling factor
output_width = dec_ctx->width * filter_config->config.realesrgan.scaling_factor;
output_height = dec_ctx->height * filter_config->config.realesrgan.scaling_factor;
}
// Initialize output encoder
encoder_config->output_width = output_width;
encoder_config->output_height = output_height;
if (init_encoder(output_filename, &ofmt_ctx, &enc_ctx, dec_ctx, encoder_config) < 0) {
fprintf(stderr, "Failed to initialize encoder\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Write the output file header
if (avformat_write_header(ofmt_ctx, NULL) < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Create and initialize the appropriate filter
switch (filter_config->filter_type) {
case FILTER_LIBPLACEBO: {
const auto &config = filter_config->config.libplacebo;
// Validate shader path
if (!config.shader_path) {
fprintf(stderr, "Shader path must be provided for the libplacebo filter\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Validate output dimensions
if (config.output_width <= 0 || config.output_height <= 0) {
fprintf(stderr, "Output dimensions must be provided for the libplacebo filter\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
filter = new LibplaceboFilter(
config.output_width, config.output_height, std::filesystem::path(config.shader_path)
);
break;
}
case FILTER_REALESRGAN: {
const auto &config = filter_config->config.realesrgan;
// Validate model name
if (!config.model) {
fprintf(stderr, "Model name must be provided for the RealESRGAN filter\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Validate scaling factor
if (config.scaling_factor <= 0) {
fprintf(stderr, "Scaling factor must be provided for the RealESRGAN filter\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
filter = new RealesrganFilter(
config.gpuid, config.tta_mode, config.scaling_factor, config.model
);
break;
}
default:
fprintf(stderr, "Unknown filter type\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Initialize the filter
if (filter->init(dec_ctx, enc_ctx) < 0) {
fprintf(stderr, "Failed to initialize filter\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Process frames
if ((ret =
process_frames(status, fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter, video_stream_index)
) < 0) {
fprintf(stderr, "Error processing frames\n");
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
return 1;
}
// Write the output file trailer
av_write_trailer(ofmt_ctx);
// Cleanup before returning
cleanup(fmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, filter);
if (ret < 0 && ret != AVERROR_EOF) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(ret, errbuf, sizeof(errbuf));
fprintf(stderr, "Error occurred: %s\n", errbuf);
return 1;
}
return 0;
}

129
src/realesrgan_filter.cpp Normal file
View File

@@ -0,0 +1,129 @@
#include <cstdint>
#include <cstdio>
#include <filesystem>
#include <string>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
}
#include "conversions.h"
#include "fsutils.h"
#include "realesrgan.h"
#include "realesrgan_filter.h"
RealesrganFilter::RealesrganFilter(
int gpuid,
bool tta_mode,
int scaling_factor,
const char *model,
const std::filesystem::path custom_model_param_path,
const std::filesystem::path custom_model_bin_path
)
: realesrgan(nullptr),
gpuid(gpuid),
tta_mode(tta_mode),
scaling_factor(scaling_factor),
model(model),
custom_model_param_path(std::move(custom_model_param_path)),
custom_model_bin_path(std::move(custom_model_bin_path)) {}
RealesrganFilter::~RealesrganFilter() {
if (realesrgan) {
delete realesrgan;
realesrgan = nullptr;
}
}
int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx) {
// Construct the model paths using std::filesystem
std::filesystem::path model_param_path;
std::filesystem::path model_bin_path;
if (model) {
// Find the model paths by model name if provided
model_param_path = std::filesystem::path("models") /
(std::string(model) + "-x" + std::to_string(scaling_factor) + ".param");
model_bin_path = std::filesystem::path("models") /
(std::string(model) + "-x" + std::to_string(scaling_factor) + ".bin");
} else if (!custom_model_param_path.empty() && !custom_model_bin_path.empty()) {
// Use the custom model paths if provided
model_param_path = custom_model_param_path;
model_bin_path = custom_model_bin_path;
} else {
// Neither model name nor custom model paths provided
fprintf(stderr, "Model or model paths must be provided for RealESRGAN filter\n");
return -1;
}
// Get the full paths using a function that possibly modifies or validates the path
std::filesystem::path model_param_full_path = find_resource_file(model_param_path);
std::filesystem::path model_bin_full_path = find_resource_file(model_bin_path);
// Create a new RealESRGAN instance
realesrgan = new RealESRGAN(gpuid, tta_mode);
// Store the time bases
input_time_base = dec_ctx->time_base;
output_time_base = enc_ctx->time_base;
output_pix_fmt = enc_ctx->pix_fmt;
// Load the model
if (realesrgan->load(model_param_full_path, model_bin_full_path) != 0) {
fprintf(stderr, "Failed to load RealESRGAN model\n");
return -1;
}
// Set RealESRGAN parameters
realesrgan->scale = scaling_factor;
realesrgan->prepadding = 10;
// Calculate tilesize based on GPU heap budget
uint32_t heap_budget = ncnn::get_gpu_device(gpuid)->get_heap_budget();
if (heap_budget > 1900) {
realesrgan->tilesize = 200;
} else if (heap_budget > 550) {
realesrgan->tilesize = 100;
} else if (heap_budget > 190) {
realesrgan->tilesize = 64;
} else {
realesrgan->tilesize = 32;
}
return 0;
}
AVFrame *RealesrganFilter::process_frame(AVFrame *input_frame) {
// Convert the input frame to RGB24
ncnn::Mat input_mat = avframe_to_ncnn_mat(input_frame);
if (input_mat.empty()) {
fprintf(stderr, "Failed to convert AVFrame to ncnn::Mat\n");
return nullptr;
}
// Allocate space for ouptut ncnn::Mat
int output_width = input_mat.w * realesrgan->scale;
int output_height = input_mat.h * realesrgan->scale;
ncnn::Mat output_mat = ncnn::Mat(output_width, output_height, (size_t)3, 3);
if (realesrgan->process(input_mat, output_mat) != 0) {
fprintf(stderr, "RealESRGAN processing failed\n");
return nullptr;
}
// Convert ncnn::Mat to AVFrame
AVFrame *output_frame = ncnn_mat_to_avframe(output_mat, output_pix_fmt);
// Rescale PTS to encoder's time base
output_frame->pts = av_rescale_q(input_frame->pts, input_time_base, output_time_base);
// Return the processed frame to the caller
return output_frame;
}
int RealesrganFilter::flush(std::vector<AVFrame *> &processed_frames) {
// No special flushing needed for RealESRGAN
return 0;
}

325
src/video2x.c Normal file
View File

@@ -0,0 +1,325 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavutil/pixdesc.h>
#include <libavutil/pixfmt.h>
#include <libvideo2x.h>
#include "getopt.h"
const char *VIDEO2X_VERSION = "6.0.0";
// Define command line options
static struct option long_options[] = {
// General options
{"input", required_argument, NULL, 'i'},
{"output", required_argument, NULL, 'o'},
{"filter", required_argument, NULL, 'f'},
{"version", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 0},
// Encoder options
{"codec", required_argument, NULL, 'c'},
{"preset", required_argument, NULL, 'p'},
{"pixfmt", required_argument, NULL, 'x'},
{"bitrate", required_argument, NULL, 'b'},
{"crf", required_argument, NULL, 'q'},
// Libplacebo options
{"shader", required_argument, NULL, 's'},
{"width", required_argument, NULL, 'w'},
{"height", required_argument, NULL, 'h'},
// RealESRGAN options
{"gpuid", required_argument, NULL, 'g'},
{"model", required_argument, NULL, 'm'},
{"scale", required_argument, NULL, 'r'},
{0, 0, 0, 0}
};
// Structure to hold parsed arguments
struct arguments {
// General options
const char *input_filename;
const char *output_filename;
const char *filter_type;
// Encoder options
const char *codec;
const char *pix_fmt;
const char *preset;
int64_t bitrate;
float crf;
// libplacebo options
const char *shader_path;
int output_width;
int output_height;
// RealESRGAN options
int gpuid;
const char *model;
int scaling_factor;
};
const char *valid_models[] = {
"realesrgan-plus",
"realesrgan-plus-anime",
"realesr-animevideov3",
};
int is_valid_realesrgan_model(const char *model) {
if (!model) {
return 0;
}
for (int i = 0; i < sizeof(valid_models) / sizeof(valid_models[0]); i++) {
if (strcmp(model, valid_models[i]) == 0) {
return 1;
}
}
return 0;
}
void print_help() {
printf("Usage: video2x [OPTIONS]\n");
printf("\nGeneral Options:\n");
printf(" -i, --input Input video file path\n");
printf(" -o, --output Output video file path\n");
printf(" -f, --filter Filter to use: 'libplacebo' or 'realesrgan'\n");
printf(" -v, --version Print program version\n");
printf(" --help Display this help page\n");
printf("\nEncoder Options (Optional):\n");
printf(" -c, --codec Output codec (default: libx264)\n");
printf(" -p, --preset Encoder preset (default: veryslow)\n");
printf(" -x, --pixfmt Output pixel format (default: yuv420p)\n");
printf(" -b, --bitrate Bitrate in bits per second (default: 2000000)\n");
printf(" -q, --crf Constant Rate Factor (default: 17.0)\n");
printf("\nlibplacebo Options:\n");
printf(" -s, --shader Name or path to custom GLSL shader file\n");
printf(" -w, --width Output width\n");
printf(" -h, --height Output height\n");
printf("\nRealESRGAN Options:\n");
printf(" -g, --gpuid Vulkan GPU ID (default: 0)\n");
printf(" -m, --model Name of the model to use\n");
printf(" -r, --scale Scaling factor (2, 3, or 4)\n");
}
void parse_arguments(int argc, char **argv, struct arguments *arguments) {
int option_index = 0;
int c;
// Default argument values
arguments->input_filename = NULL;
arguments->output_filename = NULL;
arguments->filter_type = NULL;
// Encoder options
arguments->codec = "libx264";
arguments->preset = "veryslow";
arguments->pix_fmt = "yuv420p";
arguments->bitrate = 2 * 1000 * 1000;
arguments->crf = 17.0;
// libplacebo options
arguments->shader_path = NULL;
arguments->output_width = 0;
arguments->output_height = 0;
// RealESRGAN options
arguments->gpuid = 0;
arguments->model = NULL;
arguments->scaling_factor = 0;
while ((c = getopt_long(argc, argv, "i:o:f:c:x:p:b:q:s:w:h:r:m:v", long_options, &option_index)
) != -1) {
switch (c) {
case 'i':
arguments->input_filename = optarg;
break;
case 'o':
arguments->output_filename = optarg;
break;
case 'f':
arguments->filter_type = optarg;
break;
case 'c':
arguments->codec = optarg;
break;
case 'x':
arguments->pix_fmt = optarg;
break;
case 'p':
arguments->preset = optarg;
break;
case 'b':
arguments->bitrate = strtoll(optarg, NULL, 10);
if (arguments->bitrate <= 0) {
fprintf(stderr, "Error: Invalid bitrate specified.\n");
exit(1);
}
break;
case 'q':
arguments->crf = atof(optarg);
if (arguments->crf < 0.0 || arguments->crf > 51.0) {
fprintf(stderr, "Error: CRF must be between 0 and 51.\n");
exit(1);
}
break;
case 's':
arguments->shader_path = optarg;
break;
case 'w':
arguments->output_width = atoi(optarg);
if (arguments->output_width <= 0) {
fprintf(stderr, "Error: Output width must be greater than 0.\n");
exit(1);
}
break;
case 'h':
arguments->output_height = atoi(optarg);
if (arguments->output_height <= 0) {
fprintf(stderr, "Error: Output height must be greater than 0.\n");
exit(1);
}
break;
case 'g':
arguments->gpuid = atoi(optarg);
break;
case 'm':
arguments->model = optarg;
if (!is_valid_realesrgan_model(arguments->model)) {
fprintf(
stderr,
"Error: Invalid model specified. Must be 'realesrgan-plus', 'realesrgan-plus-anime', or 'realesr-animevideov3'.\n"
);
exit(1);
}
break;
case 'r':
arguments->scaling_factor = atoi(optarg);
if (arguments->scaling_factor != 2 && arguments->scaling_factor != 3 &&
arguments->scaling_factor != 4) {
fprintf(stderr, "Error: Scaling factor must be 2, 3, or 4.\n");
exit(1);
}
break;
case 'v':
printf("video2x %s\n", VIDEO2X_VERSION);
exit(0);
case 0: // Long-only options without short equivalents (e.g., help)
if (strcmp(long_options[option_index].name, "help") == 0) {
print_help();
exit(0);
}
break;
default:
fprintf(stderr, "Invalid options provided.\n");
exit(1);
}
}
// Check for required arguments
if (!arguments->input_filename || !arguments->output_filename) {
fprintf(stderr, "Error: Input and output files are required.\n");
exit(1);
}
if (!arguments->filter_type) {
fprintf(stderr, "Error: Filter type is required (libplacebo or realesrgan).\n");
exit(1);
}
if (strcmp(arguments->filter_type, "libplacebo") == 0) {
if (!arguments->shader_path || arguments->output_width == 0 ||
arguments->output_height == 0) {
fprintf(
stderr,
"Error: For libplacebo, shader name/path (-s), width (-w), and height (-e) are required.\n"
);
exit(1);
}
} else if (strcmp(arguments->filter_type, "realesrgan") == 0) {
if (arguments->scaling_factor == 0 || !arguments->model) {
fprintf(
stderr, "Error: For realesrgan, scaling factor (-r) and model (-m) are required.\n"
);
exit(1);
}
}
}
int main(int argc, char **argv) {
struct arguments arguments;
parse_arguments(argc, argv, &arguments);
// Setup filter configurations based on the parsed arguments
struct FilterConfig filter_config;
if (strcmp(arguments.filter_type, "libplacebo") == 0) {
filter_config.filter_type = FILTER_LIBPLACEBO;
filter_config.config.libplacebo.output_width = arguments.output_width;
filter_config.config.libplacebo.output_height = arguments.output_height;
filter_config.config.libplacebo.shader_path = arguments.shader_path;
} else if (strcmp(arguments.filter_type, "realesrgan") == 0) {
filter_config.filter_type = FILTER_REALESRGAN;
filter_config.config.realesrgan.gpuid = arguments.gpuid;
filter_config.config.realesrgan.tta_mode = 0;
filter_config.config.realesrgan.scaling_factor = arguments.scaling_factor;
filter_config.config.realesrgan.model = arguments.model;
} else {
fprintf(stderr, "Error: Invalid filter type specified.\n");
return 1;
}
// Parse codec to AVCodec
const AVCodec *codec = avcodec_find_encoder_by_name(arguments.codec);
if (!codec) {
fprintf(stderr, "Error: Codec '%s' not found.\n", arguments.codec);
return 1;
}
// Parse pixel format to AVPixelFormat
enum AVPixelFormat pix_fmt = av_get_pix_fmt(arguments.pix_fmt);
if (pix_fmt == AV_PIX_FMT_NONE) {
fprintf(stderr, "Error: Invalid pixel format '%s'.\n", arguments.pix_fmt);
return 1;
}
// Setup encoder configuration
struct EncoderConfig encoder_config = {
.output_width = 0, // To be filled by libvideo2x
.output_height = 0, // To be filled by libvideo2x
.codec = codec->id,
.pix_fmt = pix_fmt,
.preset = arguments.preset,
.bit_rate = arguments.bitrate,
.crf = arguments.crf,
};
// Setup struct to store processing status
struct ProcessingStatus status = {0};
// Process the video
if (process_video(
arguments.input_filename,
arguments.output_filename,
&filter_config,
&encoder_config,
&status
)) {
fprintf(stderr, "Video processing failed.\n");
return 1;
}
// Print processing summary
printf("====== Video2X Processing summary ======\n");
printf("Video file processed: %s\n", arguments.input_filename);
printf("Total frames processed: %ld\n", status.processed_frames);
printf("Total time taken: %lds\n", time(NULL) - status.start_time);
printf("Output written to: %s\n", arguments.output_filename);
return 0;
}