mirror of
https://github.com/k4yt3x/video2x.git
synced 2026-02-13 16:44:47 +08:00
feat(*): switched to spdlog for logging and organized headers (#1183)
* feat: updated PKGBUILD description * feat: updated workflow syntax and dependencies * feat: switched logging to spdlog * chore: adjusted library defaults * ci: fixed spdlog format string issues * docs: fixed docs for libvideo2x functions * feat: organized header files * fix: fixed header installation directory * feat: link spdlog statically if compiled from source * feat: adjusted libvideo2x log level enum names * feat: added version.h header Signed-off-by: k4yt3x <i@k4yt3x.com>
This commit is contained in:
@@ -2,11 +2,13 @@
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
// Convert AVFrame format
|
||||
AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
AVFrame *dst_frame = av_frame_alloc();
|
||||
if (dst_frame == nullptr) {
|
||||
fprintf(stderr, "Failed to allocate destination AVFrame.\n");
|
||||
spdlog::error("Failed to allocate destination AVFrame.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -16,7 +18,7 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the converted frame
|
||||
if (av_frame_get_buffer(dst_frame, 32) < 0) {
|
||||
fprintf(stderr, "Failed to allocate memory for AVFrame.\n");
|
||||
spdlog::error("Failed to allocate memory for AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -36,7 +38,7 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
);
|
||||
|
||||
if (sws_ctx == nullptr) {
|
||||
fprintf(stderr, "Failed to initialize swscale context.\n");
|
||||
spdlog::error("Failed to initialize swscale context.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -66,7 +68,7 @@ ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
|
||||
if (frame->format != AV_PIX_FMT_BGR24) {
|
||||
converted_frame = convert_avframe_pix_fmt(frame, AV_PIX_FMT_BGR24);
|
||||
if (!converted_frame) {
|
||||
fprintf(stderr, "Failed to convert AVFrame to BGR24.\n");
|
||||
spdlog::error("Failed to convert AVFrame to BGR24.");
|
||||
return ncnn::Mat();
|
||||
}
|
||||
} else {
|
||||
@@ -102,7 +104,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
// Step 1: Allocate a destination AVFrame for the specified pixel format
|
||||
AVFrame *dst_frame = av_frame_alloc();
|
||||
if (!dst_frame) {
|
||||
fprintf(stderr, "Failed to allocate destination AVFrame.\n");
|
||||
spdlog::error("Failed to allocate destination AVFrame.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -112,7 +114,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the frame buffer
|
||||
if (av_frame_get_buffer(dst_frame, 32) < 0) {
|
||||
fprintf(stderr, "Failed to allocate memory for destination AVFrame.\n");
|
||||
spdlog::error("Failed to allocate memory for destination AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -120,7 +122,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
// Step 2: Convert ncnn::Mat to BGR AVFrame
|
||||
AVFrame *bgr_frame = av_frame_alloc();
|
||||
if (!bgr_frame) {
|
||||
fprintf(stderr, "Failed to allocate intermediate BGR AVFrame.\n");
|
||||
spdlog::error("Failed to allocate intermediate BGR AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -131,7 +133,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the intermediate BGR frame
|
||||
if (av_frame_get_buffer(bgr_frame, 32) < 0) {
|
||||
fprintf(stderr, "Failed to allocate memory for BGR AVFrame.\n");
|
||||
spdlog::error("Failed to allocate memory for BGR AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
av_frame_free(&bgr_frame);
|
||||
return nullptr;
|
||||
@@ -159,7 +161,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
);
|
||||
|
||||
if (sws_ctx == nullptr) {
|
||||
fprintf(stderr, "Failed to initialize swscale context.\n");
|
||||
spdlog::error("Failed to initialize swscale context.");
|
||||
av_frame_free(&bgr_frame);
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
@@ -181,7 +183,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
av_frame_free(&bgr_frame);
|
||||
|
||||
if (ret != dst_frame->height) {
|
||||
fprintf(stderr, "Failed to convert BGR AVFrame to destination pixel format.\n");
|
||||
spdlog::error("Failed to convert BGR AVFrame to destination pixel format.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_NONE;
|
||||
|
||||
// Callback function to choose the hardware-accelerated pixel format
|
||||
@@ -13,7 +15,7 @@ static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, const enum AVPixelF
|
||||
return *p;
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "Failed to get HW surface format.\n");
|
||||
spdlog::error("Failed to get HW surface format.");
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
@@ -30,19 +32,19 @@ int init_decoder(
|
||||
int ret;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, input_filename, NULL, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'\n", input_filename);
|
||||
spdlog::error("Could not open input file '{}'", input_filename);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information\n");
|
||||
spdlog::error("Failed to retrieve input stream information");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Find the first video stream
|
||||
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
spdlog::error("Could not find video stream in the input file");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -52,13 +54,15 @@ int init_decoder(
|
||||
// Set up the decoder
|
||||
const AVCodec *decoder = avcodec_find_decoder(video_stream->codecpar->codec_id);
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "Failed to find decoder for stream #%u\n", stream_index);
|
||||
spdlog::error(
|
||||
"Failed to find decoder for codec ID {}", (int)video_stream->codecpar->codec_id
|
||||
);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
|
||||
codec_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!codec_ctx) {
|
||||
fprintf(stderr, "Failed to allocate the decoder context\n");
|
||||
spdlog::error("Failed to allocate the decoder context");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -71,9 +75,8 @@ int init_decoder(
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
|
||||
if (config == nullptr) {
|
||||
fprintf(
|
||||
stderr,
|
||||
"Decoder %s does not support device type %s.\n",
|
||||
spdlog::error(
|
||||
"Decoder {} does not support device type {}.",
|
||||
decoder->name,
|
||||
av_hwdevice_get_type_name(hw_type)
|
||||
);
|
||||
@@ -90,7 +93,7 @@ int init_decoder(
|
||||
}
|
||||
|
||||
if ((ret = avcodec_parameters_to_context(codec_ctx, video_stream->codecpar)) < 0) {
|
||||
fprintf(stderr, "Failed to copy decoder parameters to input decoder context\n");
|
||||
spdlog::error("Failed to copy decoder parameters to input decoder context");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -100,7 +103,7 @@ int init_decoder(
|
||||
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video_stream, NULL);
|
||||
|
||||
if ((ret = avcodec_open2(codec_ctx, decoder, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open decoder for stream #%u\n", stream_index);
|
||||
spdlog::error("Failed to open decoder for stream #{}", stream_index);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "conversions.h"
|
||||
|
||||
static enum AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder) {
|
||||
const enum AVPixelFormat *p = encoder->pix_fmts;
|
||||
if (!p) {
|
||||
fprintf(stderr, "No pixel formats supported by encoder\n");
|
||||
spdlog::error("No pixel formats supported by encoder");
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
return *p;
|
||||
@@ -33,15 +35,14 @@ int init_encoder(
|
||||
|
||||
avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, output_filename);
|
||||
if (!fmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
spdlog::error("Could not create output context");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
const AVCodec *encoder = avcodec_find_encoder(encoder_config->codec);
|
||||
if (!encoder) {
|
||||
fprintf(
|
||||
stderr,
|
||||
"Required video encoder not found for vcodec %s\n",
|
||||
spdlog::error(
|
||||
"Required video encoder not found for vcodec {}",
|
||||
avcodec_get_name(encoder_config->codec)
|
||||
);
|
||||
return AVERROR_ENCODER_NOT_FOUND;
|
||||
@@ -50,13 +51,13 @@ int init_encoder(
|
||||
// Create a new video stream in the output file
|
||||
AVStream *out_stream = avformat_new_stream(fmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed to allocate the output video stream\n");
|
||||
spdlog::error("Failed to allocate the output video stream");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
codec_ctx = avcodec_alloc_context3(encoder);
|
||||
if (!codec_ctx) {
|
||||
fprintf(stderr, "Failed to allocate the encoder context\n");
|
||||
spdlog::error("Failed to allocate the encoder context");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -79,7 +80,7 @@ int init_encoder(
|
||||
// Fall back to the default pixel format
|
||||
codec_ctx->pix_fmt = get_encoder_default_pix_fmt(encoder);
|
||||
if (codec_ctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
fprintf(stderr, "Could not get the default pixel format for the encoder\n");
|
||||
spdlog::error("Could not get the default pixel format for the encoder");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
@@ -101,13 +102,13 @@ int init_encoder(
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(codec_ctx, encoder, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot open video encoder\n");
|
||||
spdlog::error("Cannot open video encoder");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy encoder parameters to output video stream\n");
|
||||
spdlog::error("Failed to copy encoder parameters to output video stream");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -117,7 +118,7 @@ int init_encoder(
|
||||
// Allocate the stream map
|
||||
*stream_mapping = (int *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(**stream_mapping));
|
||||
if (!*stream_mapping) {
|
||||
fprintf(stderr, "Could not allocate stream mapping\n");
|
||||
spdlog::error("Could not allocate stream mapping");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -143,13 +144,13 @@ int init_encoder(
|
||||
// Create corresponding output stream
|
||||
AVStream *out_stream = avformat_new_stream(fmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
spdlog::error("Failed allocating output stream");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy codec parameters\n");
|
||||
spdlog::error("Failed to copy codec parameters");
|
||||
return ret;
|
||||
}
|
||||
out_stream->codecpar->codec_tag = 0;
|
||||
@@ -165,7 +166,7 @@ int init_encoder(
|
||||
if (!(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&fmt_ctx->pb, output_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'\n", output_filename);
|
||||
spdlog::error("Could not open output file '{}'", output_filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -188,7 +189,7 @@ int encode_and_write_frame(
|
||||
if (frame->format != enc_ctx->pix_fmt) {
|
||||
AVFrame *converted_frame = convert_avframe_pix_fmt(frame, enc_ctx->pix_fmt);
|
||||
if (!converted_frame) {
|
||||
fprintf(stderr, "Error converting frame to encoder's pixel format\n");
|
||||
spdlog::error("Error converting frame to encoder's pixel format");
|
||||
return AVERROR_EXTERNAL;
|
||||
}
|
||||
|
||||
@@ -198,13 +199,13 @@ int encode_and_write_frame(
|
||||
|
||||
AVPacket *enc_pkt = av_packet_alloc();
|
||||
if (!enc_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
spdlog::error("Could not allocate AVPacket");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
ret = avcodec_send_frame(enc_ctx, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending frame to encoder\n");
|
||||
spdlog::error("Error sending frame to encoder");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -215,7 +216,7 @@ int encode_and_write_frame(
|
||||
av_packet_unref(enc_pkt);
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
spdlog::error("Error encoding frame");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -230,7 +231,7 @@ int encode_and_write_frame(
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
spdlog::error("Error muxing packet");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -244,7 +245,7 @@ int flush_encoder(AVCodecContext *enc_ctx, AVFormatContext *ofmt_ctx) {
|
||||
int ret;
|
||||
AVPacket *enc_pkt = av_packet_alloc();
|
||||
if (!enc_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
spdlog::error("Could not allocate AVPacket");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -255,7 +256,7 @@ int flush_encoder(AVCodecContext *enc_ctx, AVFormatContext *ofmt_ctx) {
|
||||
av_packet_unref(enc_pkt);
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
spdlog::error("Error encoding frame");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -268,7 +269,7 @@ int flush_encoder(AVCodecContext *enc_ctx, AVFormatContext *ofmt_ctx) {
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
spdlog::error("Error muxing packet");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#include <cstring>
|
||||
#endif
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#if _WIN32
|
||||
std::filesystem::path get_executable_directory() {
|
||||
std::vector<wchar_t> filepath(MAX_PATH);
|
||||
@@ -15,7 +17,7 @@ std::filesystem::path get_executable_directory() {
|
||||
// Get the executable path, expanding the buffer if necessary
|
||||
DWORD size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
|
||||
if (size == 0) {
|
||||
fprintf(stderr, "Error getting executable path: %lu\n", GetLastError());
|
||||
spdlog::error("Error getting executable path: {}", GetLastError());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
|
||||
@@ -24,7 +26,7 @@ std::filesystem::path get_executable_directory() {
|
||||
filepath.resize(filepath.size() * 2);
|
||||
size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
|
||||
if (size == 0) {
|
||||
fprintf(stderr, "Error getting executable path: %lu\n", GetLastError());
|
||||
spdlog::error("Error getting executable path: {}", GetLastError());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
}
|
||||
@@ -39,7 +41,7 @@ std::filesystem::path get_executable_directory() {
|
||||
std::filesystem::path filepath = std::filesystem::read_symlink("/proc/self/exe", ec);
|
||||
|
||||
if (ec) {
|
||||
fprintf(stderr, "Error reading /proc/self/exe: %s\n", ec.message().c_str());
|
||||
spdlog::error("Error reading /proc/self/exe: {}", ec.message());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "fsutils.h"
|
||||
|
||||
int init_libplacebo(
|
||||
@@ -20,7 +22,7 @@ int init_libplacebo(
|
||||
|
||||
AVFilterGraph *graph = avfilter_graph_alloc();
|
||||
if (!graph) {
|
||||
fprintf(stderr, "Unable to create filter graph.\n");
|
||||
spdlog::error("Unable to create filter graph.");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -30,7 +32,7 @@ int init_libplacebo(
|
||||
args,
|
||||
sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:frame_rate=%d/%d:"
|
||||
"pixel_aspect=%d/%d:colorspace=%d",
|
||||
"pixel_aspect=%d/%d:colorspace=%d:range=%d",
|
||||
dec_ctx->width,
|
||||
dec_ctx->height,
|
||||
dec_ctx->pix_fmt,
|
||||
@@ -40,12 +42,13 @@ int init_libplacebo(
|
||||
dec_ctx->framerate.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den,
|
||||
dec_ctx->colorspace
|
||||
dec_ctx->colorspace,
|
||||
dec_ctx->color_range
|
||||
);
|
||||
|
||||
ret = avfilter_graph_create_filter(buffersrc_ctx, buffersrc, "in", args, NULL, graph);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot create buffer source\n");
|
||||
spdlog::error("Cannot create buffer source.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -55,7 +58,7 @@ int init_libplacebo(
|
||||
// Create the libplacebo filter
|
||||
const AVFilter *libplacebo_filter = avfilter_get_by_name("libplacebo");
|
||||
if (!libplacebo_filter) {
|
||||
fprintf(stderr, "Filter 'libplacebo' not found\n");
|
||||
spdlog::error("Filter 'libplacebo' not found.");
|
||||
avfilter_graph_free(&graph);
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
@@ -84,7 +87,7 @@ int init_libplacebo(
|
||||
&libplacebo_ctx, libplacebo_filter, "libplacebo", filter_args, NULL, graph
|
||||
);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot create libplacebo filter\n");
|
||||
spdlog::error("Cannot create libplacebo filter.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -97,7 +100,7 @@ int init_libplacebo(
|
||||
// Link buffersrc to libplacebo
|
||||
ret = avfilter_link(last_filter, 0, libplacebo_ctx, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error connecting buffersrc to libplacebo filter\n");
|
||||
spdlog::error("Error connecting buffersrc to libplacebo filter.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -108,7 +111,7 @@ int init_libplacebo(
|
||||
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, graph);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot create buffer sink\n");
|
||||
spdlog::error("Cannot create buffer sink.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -116,7 +119,7 @@ int init_libplacebo(
|
||||
// Link libplacebo to buffersink
|
||||
ret = avfilter_link(last_filter, 0, *buffersink_ctx, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error connecting libplacebo filter to buffersink\n");
|
||||
spdlog::error("Error connecting libplacebo filter to buffersink.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -124,7 +127,7 @@ int init_libplacebo(
|
||||
// Configure the filter graph
|
||||
ret = avfilter_graph_config(graph, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error configuring the filter graph\n");
|
||||
spdlog::error("Error configuring the filter graph.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "fsutils.h"
|
||||
#include "libplacebo.h"
|
||||
|
||||
@@ -42,7 +44,7 @@ int LibplaceboFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Check if the shader file exists
|
||||
if (!std::filesystem::exists(shader_full_path)) {
|
||||
fprintf(stderr, "libplacebo shader file not found: %s\n", shader_full_path.c_str());
|
||||
spdlog::error("libplacebo shader file not found: {}", shader_full_path.string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -67,14 +69,14 @@ int LibplaceboFilter::process_frame(AVFrame *input_frame, AVFrame **output_frame
|
||||
// Get the filtered frame
|
||||
*output_frame = av_frame_alloc();
|
||||
if (*output_frame == nullptr) {
|
||||
fprintf(stderr, "Failed to allocate output frame\n");
|
||||
spdlog::error("Failed to allocate output frame");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Feed the frame to the filter graph
|
||||
ret = av_buffersrc_add_frame(buffersrc_ctx, input_frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while feeding the filter graph\n");
|
||||
spdlog::error("Error while feeding the filter graph");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -95,7 +97,7 @@ int LibplaceboFilter::process_frame(AVFrame *input_frame, AVFrame **output_frame
|
||||
int LibplaceboFilter::flush(std::vector<AVFrame *> &processed_frames) {
|
||||
int ret = av_buffersrc_add_frame(buffersrc_ctx, nullptr);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while flushing filter graph\n");
|
||||
spdlog::error("Error while flushing filter graph");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
#include "libvideo2x.h"
|
||||
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <cstdint>
|
||||
#include <thread>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "decoder.h"
|
||||
#include "encoder.h"
|
||||
#include "filter.h"
|
||||
@@ -16,13 +17,16 @@
|
||||
/**
|
||||
* @brief Process frames using the selected filter.
|
||||
*
|
||||
* @param[in] encoder_config Encoder configurations
|
||||
* @param[in,out] proc_ctx Struct containing the processing context
|
||||
* @param[in] fmt_ctx Input format context
|
||||
* @param[in] ifmt_ctx Input format context
|
||||
* @param[in] ofmt_ctx Output format context
|
||||
* @param[in] dec_ctx Decoder context
|
||||
* @param[in] enc_ctx Encoder context
|
||||
* @param[in] filter Filter instance
|
||||
* @param[in] video_stream_index Index of the video stream in the input format context
|
||||
* @param[in] stream_mapping Array mapping input stream indexes to output stream indexes
|
||||
* @param[in] benchmark Flag to enable benchmarking mode
|
||||
* @return int 0 on success, negative value on error
|
||||
*/
|
||||
int process_frames(
|
||||
@@ -79,7 +83,7 @@ int process_frames(
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error sending packet to decoder: %s\n", errbuf);
|
||||
spdlog::error("Error sending packet to decoder: {}", errbuf);
|
||||
av_packet_unref(&packet);
|
||||
goto end;
|
||||
}
|
||||
@@ -97,7 +101,7 @@ int process_frames(
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error decoding video frame: %s\n", errbuf);
|
||||
spdlog::error("Error decoding video frame: {}", errbuf);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -112,7 +116,7 @@ int process_frames(
|
||||
);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error encoding/writing frame: %s\n", errbuf);
|
||||
spdlog::error("Error encoding/writing frame: {}", errbuf);
|
||||
av_frame_free(&processed_frame);
|
||||
goto end;
|
||||
}
|
||||
@@ -121,12 +125,14 @@ int process_frames(
|
||||
av_frame_free(&processed_frame);
|
||||
proc_ctx->processed_frames++;
|
||||
} else if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Filter returned an error\n");
|
||||
spdlog::error("Filter returned an error");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_frame_unref(frame);
|
||||
// TODO: Print the debug processing status
|
||||
spdlog::debug(
|
||||
"Processed frame {}/{}", proc_ctx->processed_frames, proc_ctx->total_frames
|
||||
);
|
||||
}
|
||||
} else if (encoder_config->copy_streams && stream_mapping[packet.stream_index] >= 0) {
|
||||
AVStream *in_stream = ifmt_ctx->streams[packet.stream_index];
|
||||
@@ -140,7 +146,8 @@ int process_frames(
|
||||
// If copy streams is enabled, copy the packet to the output
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Error muxing packet: {}", errbuf);
|
||||
av_packet_unref(&packet);
|
||||
return ret;
|
||||
}
|
||||
@@ -152,7 +159,7 @@ int process_frames(
|
||||
ret = filter->flush(flushed_frames);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error flushing filter: %s\n", errbuf);
|
||||
spdlog::error("Error flushing filter: {}", errbuf);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -161,7 +168,7 @@ int process_frames(
|
||||
ret = encode_and_write_frame(flushed_frame, enc_ctx, ofmt_ctx, video_stream_index);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error encoding/writing flushed frame: %s\n", errbuf);
|
||||
spdlog::error("Error encoding/writing flushed frame: {}", errbuf);
|
||||
av_frame_free(&flushed_frame);
|
||||
flushed_frame = nullptr;
|
||||
goto end;
|
||||
@@ -174,7 +181,7 @@ int process_frames(
|
||||
ret = flush_encoder(enc_ctx, ofmt_ctx);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error flushing encoder: %s\n", errbuf);
|
||||
spdlog::error("Error flushing encoder: {}", errbuf);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -230,6 +237,8 @@ void cleanup(
|
||||
*
|
||||
* @param[in] input_filename Path to the input video file
|
||||
* @param[in] output_filename Path to the output video file
|
||||
* @param[in] log_level Log level
|
||||
* @param[in] benchmark Flag to enable benchmarking mode
|
||||
* @param[in] hw_type Hardware device type
|
||||
* @param[in] filter_config Filter configurations
|
||||
* @param[in] encoder_config Encoder configurations
|
||||
@@ -239,6 +248,7 @@ void cleanup(
|
||||
extern "C" int process_video(
|
||||
const char *input_filename,
|
||||
const char *output_filename,
|
||||
Libvideo2xLogLevel log_level,
|
||||
bool benchmark,
|
||||
AVHWDeviceType hw_type,
|
||||
const FilterConfig *filter_config,
|
||||
@@ -253,13 +263,51 @@ extern "C" int process_video(
|
||||
int *stream_mapping = nullptr;
|
||||
Filter *filter = nullptr;
|
||||
int video_stream_index = -1;
|
||||
char errbuf[AV_ERROR_MAX_STRING_SIZE];
|
||||
int ret = 0;
|
||||
|
||||
// Set the log level for FFmpeg and spdlog (libvideo2x)
|
||||
switch (log_level) {
|
||||
case LIBVIDEO2X_LOG_LEVEL_TRACE:
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
spdlog::set_level(spdlog::level::trace);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_DEBUG:
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
spdlog::set_level(spdlog::level::debug);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_INFO:
|
||||
av_log_set_level(AV_LOG_INFO);
|
||||
spdlog::set_level(spdlog::level::info);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_WARNING:
|
||||
av_log_set_level(AV_LOG_WARNING);
|
||||
spdlog::set_level(spdlog::level::warn);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_ERROR:
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
spdlog::set_level(spdlog::level::err);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_CRITICAL:
|
||||
av_log_set_level(AV_LOG_FATAL);
|
||||
spdlog::set_level(spdlog::level::critical);
|
||||
break;
|
||||
case LIBVIDEO2X_LOG_LEVEL_OFF:
|
||||
av_log_set_level(AV_LOG_QUIET);
|
||||
spdlog::set_level(spdlog::level::off);
|
||||
break;
|
||||
default:
|
||||
av_log_set_level(AV_LOG_INFO);
|
||||
spdlog::set_level(spdlog::level::info);
|
||||
break;
|
||||
}
|
||||
|
||||
// Initialize hardware device context
|
||||
if (hw_type != AV_HWDEVICE_TYPE_NONE) {
|
||||
ret = av_hwdevice_ctx_create(&hw_ctx, hw_type, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Unable to initialize hardware device context\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Error initializing hardware device context: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -267,7 +315,8 @@ extern "C" int process_video(
|
||||
// Initialize input
|
||||
ret = init_decoder(hw_type, hw_ctx, input_filename, &ifmt_ctx, &dec_ctx, &video_stream_index);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to initialize decoder\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Failed to initialize decoder: {}", errbuf);
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return ret;
|
||||
}
|
||||
@@ -300,7 +349,8 @@ extern "C" int process_video(
|
||||
&stream_mapping
|
||||
);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to initialize encoder\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Failed to initialize encoder: {}", errbuf);
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return ret;
|
||||
}
|
||||
@@ -308,7 +358,8 @@ extern "C" int process_video(
|
||||
// Write the output file header
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Error occurred when opening output file: {}", errbuf);
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return ret;
|
||||
}
|
||||
@@ -320,14 +371,14 @@ extern "C" int process_video(
|
||||
|
||||
// Validate shader path
|
||||
if (!config.shader_path) {
|
||||
fprintf(stderr, "Shader path must be provided for the libplacebo filter\n");
|
||||
spdlog::error("Shader path must be provided for the libplacebo filter");
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Validate output dimensions
|
||||
if (config.output_width <= 0 || config.output_height <= 0) {
|
||||
fprintf(stderr, "Output dimensions must be provided for the libplacebo filter\n");
|
||||
spdlog::error("Output dimensions must be provided for the libplacebo filter");
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return -1;
|
||||
}
|
||||
@@ -342,14 +393,14 @@ extern "C" int process_video(
|
||||
|
||||
// Validate model name
|
||||
if (!config.model) {
|
||||
fprintf(stderr, "Model name must be provided for the RealESRGAN filter\n");
|
||||
spdlog::error("Model name must be provided for the RealESRGAN filter");
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Validate scaling factor
|
||||
if (config.scaling_factor <= 0) {
|
||||
fprintf(stderr, "Scaling factor must be provided for the RealESRGAN filter\n");
|
||||
spdlog::error("Scaling factor must be provided for the RealESRGAN filter");
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return -1;
|
||||
}
|
||||
@@ -360,7 +411,7 @@ extern "C" int process_video(
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fprintf(stderr, "Unknown filter type\n");
|
||||
spdlog::error("Unknown filter type");
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return -1;
|
||||
}
|
||||
@@ -368,7 +419,8 @@ extern "C" int process_video(
|
||||
// Initialize the filter
|
||||
ret = filter->init(dec_ctx, enc_ctx, hw_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to initialize filter\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Failed to initialize filter: {}", errbuf);
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return ret;
|
||||
}
|
||||
@@ -387,7 +439,8 @@ extern "C" int process_video(
|
||||
benchmark
|
||||
);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error processing frames\n");
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Error processing frames: {}", errbuf);
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
return ret;
|
||||
}
|
||||
@@ -399,9 +452,8 @@ extern "C" int process_video(
|
||||
cleanup(ifmt_ctx, ofmt_ctx, dec_ctx, enc_ctx, hw_ctx, stream_mapping, filter);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char errbuf[AV_ERROR_MAX_STRING_SIZE];
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
fprintf(stderr, "Error occurred: %s\n", errbuf);
|
||||
spdlog::error("Error occurred: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "conversions.h"
|
||||
#include "fsutils.h"
|
||||
|
||||
@@ -47,7 +49,7 @@ int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
model_bin_path = custom_model_bin_path;
|
||||
} else {
|
||||
// Neither model name nor custom model paths provided
|
||||
fprintf(stderr, "Model or model paths must be provided for RealESRGAN filter\n");
|
||||
spdlog::error("Model or model paths must be provided for RealESRGAN filter");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -57,13 +59,11 @@ int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Check if the model files exist
|
||||
if (!std::filesystem::exists(model_param_full_path)) {
|
||||
fprintf(
|
||||
stderr, "RealESRGAN model param file not found: %s\n", model_param_full_path.c_str()
|
||||
);
|
||||
spdlog::error("RealESRGAN model param file not found: {}", model_param_full_path.string());
|
||||
return -1;
|
||||
}
|
||||
if (!std::filesystem::exists(model_bin_full_path)) {
|
||||
fprintf(stderr, "RealESRGAN model bin file not found: %s\n", model_bin_full_path.c_str());
|
||||
spdlog::error("RealESRGAN model bin file not found: {}", model_bin_full_path.string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ int RealesrganFilter::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Load the model
|
||||
if (realesrgan->load(model_param_full_path, model_bin_full_path) != 0) {
|
||||
fprintf(stderr, "Failed to load RealESRGAN model\n");
|
||||
spdlog::error("Failed to load RealESRGAN model");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ int RealesrganFilter::process_frame(AVFrame *input_frame, AVFrame **output_frame
|
||||
// Convert the input frame to RGB24
|
||||
ncnn::Mat input_mat = avframe_to_ncnn_mat(input_frame);
|
||||
if (input_mat.empty()) {
|
||||
fprintf(stderr, "Failed to convert AVFrame to ncnn::Mat\n");
|
||||
spdlog::error("Failed to convert AVFrame to ncnn::Mat");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ int RealesrganFilter::process_frame(AVFrame *input_frame, AVFrame **output_frame
|
||||
|
||||
ret = realesrgan->process(input_mat, output_mat);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "RealESRGAN processing failed\n");
|
||||
spdlog::error("RealESRGAN processing failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,12 +17,11 @@
|
||||
#include <libavutil/pixdesc.h>
|
||||
#include <libavutil/pixfmt.h>
|
||||
|
||||
#include <libvideo2x.h>
|
||||
#include <libvideo2x/libvideo2x.h>
|
||||
#include <libvideo2x/version.h>
|
||||
|
||||
#include "getopt.h"
|
||||
|
||||
const char *VIDEO2X_VERSION = "6.0.0";
|
||||
|
||||
// Set UNIX terminal input to non-blocking mode
|
||||
#ifndef _WIN32
|
||||
void set_nonblocking_input(bool enable) {
|
||||
@@ -42,6 +41,8 @@ void set_nonblocking_input(bool enable) {
|
||||
|
||||
// Define command line options
|
||||
static struct option long_options[] = {
|
||||
{"loglevel", required_argument, NULL, 0},
|
||||
{"noprogress", no_argument, NULL, 0},
|
||||
{"version", no_argument, NULL, 'v'},
|
||||
{"help", no_argument, NULL, 0},
|
||||
|
||||
@@ -75,6 +76,8 @@ static struct option long_options[] = {
|
||||
// Structure to hold parsed arguments
|
||||
struct arguments {
|
||||
// General options
|
||||
const char *loglevel;
|
||||
bool noprogress;
|
||||
const char *input_filename;
|
||||
const char *output_filename;
|
||||
const char *filter_type;
|
||||
@@ -129,6 +132,10 @@ int is_valid_realesrgan_model(const char *model) {
|
||||
void print_help() {
|
||||
printf("Usage: video2x [OPTIONS]\n");
|
||||
printf("\nOptions:\n");
|
||||
printf(
|
||||
" --loglevel Set log level (trace, debug, info, warn, error, critical, none)\n"
|
||||
);
|
||||
printf(" --noprogress Do not display the progress bar\n");
|
||||
printf(" -v, --version Print program version\n");
|
||||
printf(" -?, --help Display this help page\n");
|
||||
printf("\nGeneral Processing Options:\n");
|
||||
@@ -166,6 +173,8 @@ void parse_arguments(int argc, char **argv, struct arguments *arguments) {
|
||||
int c;
|
||||
|
||||
// Default argument values
|
||||
arguments->loglevel = "info";
|
||||
arguments->noprogress = false;
|
||||
arguments->input_filename = NULL;
|
||||
arguments->output_filename = NULL;
|
||||
arguments->filter_type = NULL;
|
||||
@@ -269,10 +278,14 @@ void parse_arguments(int argc, char **argv, struct arguments *arguments) {
|
||||
}
|
||||
break;
|
||||
case 'v':
|
||||
printf("Video2X v%s\n", VIDEO2X_VERSION);
|
||||
printf("Video2X version %s\n", LIBVIDEO2X_VERSION_STRING);
|
||||
exit(0);
|
||||
case 0: // Long-only options without short equivalents
|
||||
if (strcmp(long_options[option_index].name, "help") == 0) {
|
||||
if (strcmp(long_options[option_index].name, "loglevel") == 0) {
|
||||
arguments->loglevel = optarg;
|
||||
} else if (strcmp(long_options[option_index].name, "noprogress") == 0) {
|
||||
arguments->noprogress = true;
|
||||
} else if (strcmp(long_options[option_index].name, "help") == 0) {
|
||||
print_help();
|
||||
exit(0);
|
||||
} else if (strcmp(long_options[option_index].name, "nocopystreams") == 0) {
|
||||
@@ -323,11 +336,33 @@ void parse_arguments(int argc, char **argv, struct arguments *arguments) {
|
||||
}
|
||||
}
|
||||
|
||||
enum Libvideo2xLogLevel parse_log_level(const char *level_name) {
|
||||
if (strcmp(level_name, "trace") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_TRACE;
|
||||
} else if (strcmp(level_name, "debug") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_DEBUG;
|
||||
} else if (strcmp(level_name, "info") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_INFO;
|
||||
} else if (strcmp(level_name, "warning") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_WARNING;
|
||||
} else if (strcmp(level_name, "error") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_ERROR;
|
||||
} else if (strcmp(level_name, "critical") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_CRITICAL;
|
||||
} else if (strcmp(level_name, "off") == 0) {
|
||||
return LIBVIDEO2X_LOG_LEVEL_OFF;
|
||||
} else {
|
||||
fprintf(stderr, "Warning: Invalid log level specified. Defaulting to 'info'.\n");
|
||||
return LIBVIDEO2X_LOG_LEVEL_INFO;
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper function for video processing thread
|
||||
int process_video_thread(void *arg) {
|
||||
struct ProcessVideoThreadArguments *thread_args = (struct ProcessVideoThreadArguments *)arg;
|
||||
|
||||
// Extract individual arguments
|
||||
enum Libvideo2xLogLevel log_level = parse_log_level(thread_args->arguments->loglevel);
|
||||
struct arguments *arguments = thread_args->arguments;
|
||||
enum AVHWDeviceType hw_device_type = thread_args->hw_device_type;
|
||||
struct FilterConfig *filter_config = thread_args->filter_config;
|
||||
@@ -338,6 +373,7 @@ int process_video_thread(void *arg) {
|
||||
int result = process_video(
|
||||
arguments->input_filename,
|
||||
arguments->output_filename,
|
||||
log_level,
|
||||
arguments->benchmark,
|
||||
hw_device_type,
|
||||
filter_config,
|
||||
@@ -436,19 +472,18 @@ int main(int argc, char **argv) {
|
||||
.proc_ctx = &proc_ctx
|
||||
};
|
||||
|
||||
// Enable non-blocking input
|
||||
#ifndef _WIN32
|
||||
set_nonblocking_input(true);
|
||||
#endif
|
||||
|
||||
// Create a thread for video processing
|
||||
thrd_t processing_thread;
|
||||
if (thrd_create(&processing_thread, process_video_thread, &thread_args) != thrd_success) {
|
||||
fprintf(stderr, "Failed to create processing thread\n");
|
||||
return 1;
|
||||
}
|
||||
printf("[Video2X] Video processing started.\n");
|
||||
printf("[Video2X] Press SPACE to pause/resume, 'q' to abort.\n");
|
||||
printf("Video processing started; press SPACE to pause/resume, 'q' to abort.\n");
|
||||
|
||||
// Enable non-blocking input
|
||||
#ifndef _WIN32
|
||||
set_nonblocking_input(true);
|
||||
#endif
|
||||
|
||||
// Main thread loop to display progress and handle input
|
||||
while (!proc_ctx.completed) {
|
||||
@@ -468,23 +503,21 @@ int main(int argc, char **argv) {
|
||||
// Toggle pause state
|
||||
proc_ctx.pause = !proc_ctx.pause;
|
||||
if (proc_ctx.pause) {
|
||||
printf("\n[Video2X] Processing paused. Press SPACE to resume, 'q' to abort.");
|
||||
printf("\nProcessing paused. Press SPACE to resume, 'q' to abort.\n");
|
||||
} else {
|
||||
printf("\n[Video2X] Resuming processing...");
|
||||
printf("Resuming processing...\n");
|
||||
}
|
||||
fflush(stdout);
|
||||
} else if (ch == 'q' || ch == 'Q') {
|
||||
// Abort processing
|
||||
printf("\n[Video2X] Aborting processing...");
|
||||
fflush(stdout);
|
||||
printf("Aborting processing...\n");
|
||||
proc_ctx.abort = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Display progress
|
||||
if (!proc_ctx.pause && proc_ctx.total_frames > 0) {
|
||||
if (!arguments.noprogress && !proc_ctx.pause && proc_ctx.total_frames > 0) {
|
||||
printf(
|
||||
"\r[Video2X] Processing frame %ld/%ld (%.2f%%); time elapsed: %lds",
|
||||
"\rProcessing frame %ld/%ld (%.2f%%); time elapsed: %lds",
|
||||
proc_ctx.processed_frames,
|
||||
proc_ctx.total_frames,
|
||||
proc_ctx.total_frames > 0
|
||||
@@ -495,10 +528,9 @@ int main(int argc, char **argv) {
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
// Sleep for a short duration
|
||||
thrd_sleep(&(struct timespec){.tv_sec = 0, .tv_nsec = 100000000}, NULL); // Sleep for 100ms
|
||||
// Sleep for 50ms
|
||||
thrd_sleep(&(struct timespec){.tv_sec = 0, .tv_nsec = 50000000}, NULL);
|
||||
}
|
||||
puts(""); // Print newline after progress bar is complete
|
||||
|
||||
// Restore terminal to blocking mode
|
||||
#ifndef _WIN32
|
||||
@@ -509,6 +541,11 @@ int main(int argc, char **argv) {
|
||||
int process_result;
|
||||
thrd_join(processing_thread, &process_result);
|
||||
|
||||
// Print a newline if progress bar was displayed
|
||||
if (!arguments.noprogress && process_result == 0) {
|
||||
puts("");
|
||||
}
|
||||
|
||||
if (proc_ctx.abort) {
|
||||
fprintf(stderr, "Video processing aborted\n");
|
||||
return 2;
|
||||
|
||||
Reference in New Issue
Block a user