mirror of
https://github.com/k4yt3x/video2x.git
synced 2026-02-15 17:54:49 +08:00
feat(logger): add logger manager to provide unified logging (#1267)
Signed-off-by: k4yt3x <i@k4yt3x.com>
This commit is contained in:
@@ -10,6 +10,7 @@ extern "C" {
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "conversions.h"
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace avutils {
|
||||
@@ -78,7 +79,7 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
|
||||
);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::error("Failed to get supported pixel formats: {}", errbuf);
|
||||
logger()->error("Failed to get supported pixel formats: {}", errbuf);
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
@@ -119,7 +120,7 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
|
||||
}
|
||||
}
|
||||
if (best_pix_fmt == AV_PIX_FMT_NONE) {
|
||||
spdlog::error("No suitable pixel format found for encoder");
|
||||
logger()->error("No suitable pixel format found for encoder");
|
||||
}
|
||||
|
||||
if (target_pix_fmt != AV_PIX_FMT_NONE && best_pix_fmt != target_pix_fmt) {
|
||||
@@ -136,12 +137,12 @@ AVPixelFormat get_encoder_default_pix_fmt(const AVCodec *encoder, AVPixelFormat
|
||||
|
||||
float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
|
||||
if (!frame1 || !frame2) {
|
||||
spdlog::error("Invalid frame(s) provided for comparison");
|
||||
logger()->error("Invalid frame(s) provided for comparison");
|
||||
return -1.0f;
|
||||
}
|
||||
|
||||
if (frame1->width != frame2->width || frame1->height != frame2->height) {
|
||||
spdlog::error("Frame dimensions do not match");
|
||||
logger()->error("Frame dimensions do not match");
|
||||
return -1.0f;
|
||||
}
|
||||
|
||||
@@ -154,7 +155,7 @@ float get_frame_diff(AVFrame *frame1, AVFrame *frame2) {
|
||||
AVFrame *rgb_frame2 = conversions::convert_avframe_pix_fmt(frame2, target_pix_fmt);
|
||||
|
||||
if (!rgb_frame1 || !rgb_frame2) {
|
||||
spdlog::error("Failed to convert frames to target pixel format");
|
||||
logger()->error("Failed to convert frames to target pixel format");
|
||||
if (rgb_frame1) {
|
||||
av_frame_free(&rgb_frame1);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace conversions {
|
||||
|
||||
@@ -12,7 +14,7 @@ namespace conversions {
|
||||
AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
AVFrame *dst_frame = av_frame_alloc();
|
||||
if (dst_frame == nullptr) {
|
||||
spdlog::error("Failed to allocate destination AVFrame.");
|
||||
logger()->error("Failed to allocate destination AVFrame.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -22,7 +24,7 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the converted frame
|
||||
if (av_frame_get_buffer(dst_frame, 32) < 0) {
|
||||
spdlog::error("Failed to allocate memory for AVFrame.");
|
||||
logger()->error("Failed to allocate memory for AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -42,7 +44,7 @@ AVFrame *convert_avframe_pix_fmt(AVFrame *src_frame, AVPixelFormat pix_fmt) {
|
||||
);
|
||||
|
||||
if (sws_ctx == nullptr) {
|
||||
spdlog::error("Failed to initialize swscale context.");
|
||||
logger()->error("Failed to initialize swscale context.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -72,7 +74,7 @@ ncnn::Mat avframe_to_ncnn_mat(AVFrame *frame) {
|
||||
if (frame->format != AV_PIX_FMT_BGR24) {
|
||||
converted_frame = convert_avframe_pix_fmt(frame, AV_PIX_FMT_BGR24);
|
||||
if (!converted_frame) {
|
||||
spdlog::error("Failed to convert AVFrame to BGR24.");
|
||||
logger()->error("Failed to convert AVFrame to BGR24.");
|
||||
return ncnn::Mat();
|
||||
}
|
||||
} else {
|
||||
@@ -110,7 +112,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
// Step 1: Allocate a destination AVFrame for the specified pixel format
|
||||
AVFrame *dst_frame = av_frame_alloc();
|
||||
if (!dst_frame) {
|
||||
spdlog::error("Failed to allocate destination AVFrame.");
|
||||
logger()->error("Failed to allocate destination AVFrame.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -120,7 +122,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the frame buffer
|
||||
if (av_frame_get_buffer(dst_frame, 32) < 0) {
|
||||
spdlog::error("Failed to allocate memory for destination AVFrame.");
|
||||
logger()->error("Failed to allocate memory for destination AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -128,7 +130,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
// Step 2: Convert ncnn::Mat to BGR AVFrame
|
||||
AVFrame *bgr_frame = av_frame_alloc();
|
||||
if (!bgr_frame) {
|
||||
spdlog::error("Failed to allocate intermediate BGR AVFrame.");
|
||||
logger()->error("Failed to allocate intermediate BGR AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
@@ -139,7 +141,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
|
||||
// Allocate memory for the intermediate BGR frame
|
||||
if (av_frame_get_buffer(bgr_frame, 32) < 0) {
|
||||
spdlog::error("Failed to allocate memory for BGR AVFrame.");
|
||||
logger()->error("Failed to allocate memory for BGR AVFrame.");
|
||||
av_frame_free(&dst_frame);
|
||||
av_frame_free(&bgr_frame);
|
||||
return nullptr;
|
||||
@@ -169,7 +171,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
);
|
||||
|
||||
if (sws_ctx == nullptr) {
|
||||
spdlog::error("Failed to initialize swscale context.");
|
||||
logger()->error("Failed to initialize swscale context.");
|
||||
av_frame_free(&bgr_frame);
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
@@ -191,7 +193,7 @@ AVFrame *ncnn_mat_to_avframe(const ncnn::Mat &mat, AVPixelFormat pix_fmt) {
|
||||
av_frame_free(&bgr_frame);
|
||||
|
||||
if (ret != dst_frame->height) {
|
||||
spdlog::error("Failed to convert BGR AVFrame to destination pixel format.");
|
||||
logger()->error("Failed to convert BGR AVFrame to destination pixel format.");
|
||||
av_frame_free(&dst_frame);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace decoder {
|
||||
|
||||
@@ -26,7 +28,7 @@ AVPixelFormat Decoder::get_hw_format(AVCodecContext *_, const AVPixelFormat *pix
|
||||
return *p;
|
||||
}
|
||||
}
|
||||
spdlog::error("Failed to get HW surface format.");
|
||||
logger()->error("Failed to get HW surface format.");
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
@@ -39,20 +41,20 @@ int Decoder::init(
|
||||
|
||||
// Open the input file
|
||||
if ((ret = avformat_open_input(&fmt_ctx_, in_fpath.u8string().c_str(), nullptr, nullptr)) < 0) {
|
||||
spdlog::error("Could not open input file '{}'", in_fpath.u8string());
|
||||
logger()->error("Could not open input file '{}'", in_fpath.u8string());
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Retrieve stream information
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx_, nullptr)) < 0) {
|
||||
spdlog::error("Failed to retrieve input stream information");
|
||||
logger()->error("Failed to retrieve input stream information");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Find the first video stream
|
||||
ret = av_find_best_stream(fmt_ctx_, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Could not find video stream in the input file");
|
||||
logger()->error("Could not find video stream in the input file");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -62,7 +64,7 @@ int Decoder::init(
|
||||
// Find the decoder for the video stream
|
||||
const AVCodec *decoder = avcodec_find_decoder(video_stream->codecpar->codec_id);
|
||||
if (!decoder) {
|
||||
spdlog::error(
|
||||
logger()->error(
|
||||
"Failed to find decoder for codec ID {}",
|
||||
static_cast<int>(video_stream->codecpar->codec_id)
|
||||
);
|
||||
@@ -72,13 +74,13 @@ int Decoder::init(
|
||||
// Allocate the decoder context
|
||||
dec_ctx_ = avcodec_alloc_context3(decoder);
|
||||
if (!dec_ctx_) {
|
||||
spdlog::error("Failed to allocate the decoder context");
|
||||
logger()->error("Failed to allocate the decoder context");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
// Copy codec parameters from input stream to decoder context
|
||||
if ((ret = avcodec_parameters_to_context(dec_ctx_, video_stream->codecpar)) < 0) {
|
||||
spdlog::error("Failed to copy decoder parameters to input decoder context");
|
||||
logger()->error("Failed to copy decoder parameters to input decoder context");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -96,7 +98,7 @@ int Decoder::init(
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
|
||||
if (config == nullptr) {
|
||||
spdlog::error(
|
||||
logger()->error(
|
||||
"Decoder {} does not support device type {}.",
|
||||
decoder->name,
|
||||
av_hwdevice_get_type_name(hw_type)
|
||||
@@ -113,7 +115,7 @@ int Decoder::init(
|
||||
|
||||
// Open the decoder
|
||||
if ((ret = avcodec_open2(dec_ctx_, decoder, nullptr)) < 0) {
|
||||
spdlog::error("Failed to open decoder for stream #{}", stream_index);
|
||||
logger()->error("Failed to open decoder for stream #{}", stream_index);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ extern "C" {
|
||||
#include <libavutil/opt.h>
|
||||
}
|
||||
|
||||
#include "logger_manager.h"
|
||||
|
||||
#include "avutils.h"
|
||||
#include "conversions.h"
|
||||
|
||||
@@ -46,14 +48,14 @@ int Encoder::init(
|
||||
// Allocate the output format context
|
||||
avformat_alloc_output_context2(&ofmt_ctx_, nullptr, nullptr, out_fpath.u8string().c_str());
|
||||
if (!ofmt_ctx_) {
|
||||
spdlog::error("Could not create output context");
|
||||
logger()->error("Could not create output context");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
// Find the encoder
|
||||
const AVCodec *encoder = avcodec_find_encoder(enc_cfg.codec);
|
||||
if (!encoder) {
|
||||
spdlog::error(
|
||||
logger()->error(
|
||||
"Required video encoder not found for codec {}", avcodec_get_name(enc_cfg.codec)
|
||||
);
|
||||
return AVERROR_ENCODER_NOT_FOUND;
|
||||
@@ -62,7 +64,7 @@ int Encoder::init(
|
||||
// Create a new video stream in the output file
|
||||
AVStream *out_vstream = avformat_new_stream(ofmt_ctx_, nullptr);
|
||||
if (!out_vstream) {
|
||||
spdlog::error("Failed to allocate the output video stream");
|
||||
logger()->error("Failed to allocate the output video stream");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
out_vstream_idx_ = out_vstream->index;
|
||||
@@ -70,7 +72,7 @@ int Encoder::init(
|
||||
// Allocate the encoder context
|
||||
enc_ctx_ = avcodec_alloc_context3(encoder);
|
||||
if (!enc_ctx_) {
|
||||
spdlog::error("Failed to allocate the encoder context");
|
||||
logger()->error("Failed to allocate the encoder context");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -121,7 +123,7 @@ int Encoder::init(
|
||||
// Automatically select the pixel format
|
||||
enc_ctx_->pix_fmt = avutils::get_encoder_default_pix_fmt(encoder, dec_ctx->pix_fmt);
|
||||
if (enc_ctx_->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
spdlog::error("Could not get the default pixel format for the encoder");
|
||||
logger()->error("Could not get the default pixel format for the encoder");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
spdlog::debug("Auto-selected pixel format: {}", av_get_pix_fmt_name(enc_ctx_->pix_fmt));
|
||||
@@ -165,14 +167,14 @@ int Encoder::init(
|
||||
|
||||
// Open the encoder
|
||||
if ((ret = avcodec_open2(enc_ctx_, encoder, nullptr)) < 0) {
|
||||
spdlog::error("Cannot open video encoder");
|
||||
logger()->error("Cannot open video encoder");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Copy encoder parameters to output video stream
|
||||
ret = avcodec_parameters_from_context(out_vstream->codecpar, enc_ctx_);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Failed to copy encoder parameters to output video stream");
|
||||
logger()->error("Failed to copy encoder parameters to output video stream");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -186,7 +188,7 @@ int Encoder::init(
|
||||
stream_map_ =
|
||||
reinterpret_cast<int *>(av_malloc_array(ifmt_ctx->nb_streams, sizeof(*stream_map_)));
|
||||
if (!stream_map_) {
|
||||
spdlog::error("Could not allocate stream mapping");
|
||||
logger()->error("Could not allocate stream mapping");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -212,14 +214,14 @@ int Encoder::init(
|
||||
// Create corresponding output stream for audio and subtitle streams
|
||||
AVStream *out_stream = avformat_new_stream(ofmt_ctx_, nullptr);
|
||||
if (!out_stream) {
|
||||
spdlog::error("Failed allocating output stream");
|
||||
logger()->error("Failed allocating output stream");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
// Copy codec parameters from input to output
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Failed to copy codec parameters");
|
||||
logger()->error("Failed to copy codec parameters");
|
||||
return ret;
|
||||
}
|
||||
out_stream->codecpar->codec_tag = 0;
|
||||
@@ -237,7 +239,7 @@ int Encoder::init(
|
||||
if (!(ofmt_ctx_->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx_->pb, out_fpath.u8string().c_str(), AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Could not open output file '{}'", out_fpath.u8string());
|
||||
logger()->error("Could not open output file '{}'", out_fpath.u8string());
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -245,7 +247,7 @@ int Encoder::init(
|
||||
// Write the output file header
|
||||
ret = avformat_write_header(ofmt_ctx_, nullptr);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error writing output file header");
|
||||
logger()->error("Error writing output file header");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -267,7 +269,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
|
||||
if (frame->format != enc_ctx_->pix_fmt) {
|
||||
converted_frame = conversions::convert_avframe_pix_fmt(frame, enc_ctx_->pix_fmt);
|
||||
if (!converted_frame) {
|
||||
spdlog::error("Error converting frame to encoder's pixel format");
|
||||
logger()->error("Error converting frame to encoder's pixel format");
|
||||
return AVERROR_EXTERNAL;
|
||||
}
|
||||
converted_frame->pts = frame->pts;
|
||||
@@ -275,7 +277,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
|
||||
|
||||
AVPacket *enc_pkt = av_packet_alloc();
|
||||
if (!enc_pkt) {
|
||||
spdlog::error("Could not allocate AVPacket");
|
||||
logger()->error("Could not allocate AVPacket");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -287,7 +289,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
|
||||
ret = avcodec_send_frame(enc_ctx_, frame);
|
||||
}
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error sending frame to encoder");
|
||||
logger()->error("Error sending frame to encoder");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -299,7 +301,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
|
||||
av_packet_unref(enc_pkt);
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
spdlog::error("Error encoding frame");
|
||||
logger()->error("Error encoding frame");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -314,7 +316,7 @@ int Encoder::write_frame(AVFrame *frame, int64_t frame_idx) {
|
||||
ret = av_interleaved_write_frame(ofmt_ctx_, enc_pkt);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error muxing packet");
|
||||
logger()->error("Error muxing packet");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -328,14 +330,14 @@ int Encoder::flush() {
|
||||
int ret;
|
||||
AVPacket *enc_pkt = av_packet_alloc();
|
||||
if (!enc_pkt) {
|
||||
spdlog::error("Could not allocate AVPacket");
|
||||
logger()->error("Could not allocate AVPacket");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
// Send a NULL frame to signal the encoder to flush
|
||||
ret = avcodec_send_frame(enc_ctx_, nullptr);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error sending NULL frame to encoder during flush");
|
||||
logger()->error("Error sending NULL frame to encoder during flush");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -347,7 +349,7 @@ int Encoder::flush() {
|
||||
av_packet_unref(enc_pkt);
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
spdlog::error("Error encoding packet during flush");
|
||||
logger()->error("Error encoding packet during flush");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
@@ -362,7 +364,7 @@ int Encoder::flush() {
|
||||
ret = av_interleaved_write_frame(ofmt_ctx_, enc_pkt);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error muxing packet during flush");
|
||||
logger()->error("Error muxing packet during flush");
|
||||
av_packet_free(&enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
#include "fsutils.h"
|
||||
#include "libplacebo.h"
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace processors {
|
||||
@@ -55,7 +56,7 @@ int FilterLibplacebo::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Check if the shader file exists
|
||||
if (!std::filesystem::exists(shader_full_path)) {
|
||||
spdlog::error("libplacebo shader file not found: '{}'", shader_path_.u8string());
|
||||
logger()->error("libplacebo shader file not found: '{}'", shader_path_.u8string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -90,14 +91,14 @@ int FilterLibplacebo::filter(AVFrame *in_frame, AVFrame **out_frame) {
|
||||
// Get the filtered frame
|
||||
*out_frame = av_frame_alloc();
|
||||
if (*out_frame == nullptr) {
|
||||
spdlog::error("Failed to allocate output frame");
|
||||
logger()->error("Failed to allocate output frame");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Feed the frame to the filter graph
|
||||
ret = av_buffersrc_add_frame(buffersrc_ctx_, in_frame);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error while feeding the filter graph");
|
||||
logger()->error("Error while feeding the filter graph");
|
||||
av_frame_free(out_frame);
|
||||
return ret;
|
||||
}
|
||||
@@ -118,7 +119,7 @@ int FilterLibplacebo::filter(AVFrame *in_frame, AVFrame **out_frame) {
|
||||
int FilterLibplacebo::flush(std::vector<AVFrame *> &flushed_frames) {
|
||||
int ret = av_buffersrc_add_frame(buffersrc_ctx_, nullptr);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error while flushing filter graph");
|
||||
logger()->error("Error while flushing filter graph");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include "conversions.h"
|
||||
#include "fsutils.h"
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace processors {
|
||||
@@ -46,18 +47,16 @@ int FilterRealesrgan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
model_bin_path = std::filesystem::path(STR("models")) / STR("realesrgan") / bin_file_name;
|
||||
|
||||
// Get the full paths using a function that possibly modifies or validates the path
|
||||
std::filesystem::path model_param_full_path =
|
||||
fsutils::find_resource_file(model_param_path);
|
||||
std::filesystem::path model_bin_full_path =
|
||||
fsutils::find_resource_file(model_bin_path);
|
||||
std::filesystem::path model_param_full_path = fsutils::find_resource_file(model_param_path);
|
||||
std::filesystem::path model_bin_full_path = fsutils::find_resource_file(model_bin_path);
|
||||
|
||||
// Check if the model files exist
|
||||
if (!std::filesystem::exists(model_param_full_path)) {
|
||||
spdlog::error("RealESRGAN model param file not found: {}", model_param_path.u8string());
|
||||
logger()->error("RealESRGAN model param file not found: {}", model_param_path.u8string());
|
||||
return -1;
|
||||
}
|
||||
if (!std::filesystem::exists(model_bin_full_path)) {
|
||||
spdlog::error("RealESRGAN model bin file not found: {}", model_bin_path.u8string());
|
||||
logger()->error("RealESRGAN model bin file not found: {}", model_bin_path.u8string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -71,7 +70,7 @@ int FilterRealesrgan::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Load the model
|
||||
if (realesrgan_->load(model_param_full_path, model_bin_full_path) != 0) {
|
||||
spdlog::error("Failed to load RealESRGAN model");
|
||||
logger()->error("Failed to load RealESRGAN model");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -100,7 +99,7 @@ int FilterRealesrgan::filter(AVFrame *in_frame, AVFrame **out_frame) {
|
||||
// Convert the input frame to RGB24
|
||||
ncnn::Mat in_mat = conversions::avframe_to_ncnn_mat(in_frame);
|
||||
if (in_mat.empty()) {
|
||||
spdlog::error("Failed to convert AVFrame to ncnn::Mat");
|
||||
logger()->error("Failed to convert AVFrame to ncnn::Mat");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -111,7 +110,7 @@ int FilterRealesrgan::filter(AVFrame *in_frame, AVFrame **out_frame) {
|
||||
|
||||
ret = realesrgan_->process(in_mat, out_mat);
|
||||
if (ret != 0) {
|
||||
spdlog::error("RealESRGAN processing failed");
|
||||
logger()->error("RealESRGAN processing failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace fsutils {
|
||||
|
||||
@@ -20,7 +22,7 @@ static std::filesystem::path get_executable_directory() {
|
||||
// Get the executable path, expanding the buffer if necessary
|
||||
DWORD size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
|
||||
if (size == 0) {
|
||||
spdlog::error("Error getting executable path: {}", GetLastError());
|
||||
logger()->error("Error getting executable path: {}", GetLastError());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
|
||||
@@ -29,7 +31,7 @@ static std::filesystem::path get_executable_directory() {
|
||||
filepath.resize(filepath.size() * 2);
|
||||
size = GetModuleFileNameW(NULL, filepath.data(), static_cast<DWORD>(filepath.size()));
|
||||
if (size == 0) {
|
||||
spdlog::error("Error getting executable path: {}", GetLastError());
|
||||
logger()->error("Error getting executable path: {}", GetLastError());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
}
|
||||
@@ -44,7 +46,7 @@ static std::filesystem::path get_executable_directory() {
|
||||
std::filesystem::path filepath = std::filesystem::read_symlink("/proc/self/exe", ec);
|
||||
|
||||
if (ec) {
|
||||
spdlog::error("Error reading /proc/self/exe: {}", ec.message());
|
||||
logger()->error("Error reading /proc/self/exe: {}", ec.message());
|
||||
return std::filesystem::path();
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
#include "conversions.h"
|
||||
#include "fsutils.h"
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace processors {
|
||||
@@ -42,12 +43,11 @@ int InterpolatorRIFE::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
model_param_dir = std::filesystem::path(STR("models")) / STR("rife") / model_name_;
|
||||
|
||||
// Get the full paths using a function that possibly modifies or validates the path
|
||||
std::filesystem::path model_param_full_path =
|
||||
fsutils::find_resource_file(model_param_dir);
|
||||
std::filesystem::path model_param_full_path = fsutils::find_resource_file(model_param_dir);
|
||||
|
||||
// Check if the model files exist
|
||||
if (!std::filesystem::exists(model_param_full_path)) {
|
||||
spdlog::error("RIFE model param directory not found: {}", model_param_dir.u8string());
|
||||
logger()->error("RIFE model param directory not found: {}", model_param_dir.u8string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ int InterpolatorRIFE::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
} else if (model_name_.find(STR("rife-v4")) != fsutils::StringType::npos) {
|
||||
rife_v4 = true;
|
||||
} else if (model_name_.find(STR("rife")) == fsutils::StringType::npos) {
|
||||
spdlog::critical("Failed to infer RIFE model generation from model name");
|
||||
logger()->critical("Failed to infer RIFE model generation from model name");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ int InterpolatorRIFE::init(AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, AVB
|
||||
|
||||
// Load the model
|
||||
if (rife_->load(model_param_full_path) != 0) {
|
||||
spdlog::error("Failed to load RIFE model");
|
||||
logger()->error("Failed to load RIFE model");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -93,13 +93,13 @@ int InterpolatorRIFE::interpolate(
|
||||
|
||||
ncnn::Mat in_mat1 = conversions::avframe_to_ncnn_mat(prev_frame);
|
||||
if (in_mat1.empty()) {
|
||||
spdlog::error("Failed to convert AVFrame to ncnn::Mat");
|
||||
logger()->error("Failed to convert AVFrame to ncnn::Mat");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ncnn::Mat in_mat2 = conversions::avframe_to_ncnn_mat(in_frame);
|
||||
if (in_mat2.empty()) {
|
||||
spdlog::error("Failed to convert AVFrame to ncnn::Mat");
|
||||
logger()->error("Failed to convert AVFrame to ncnn::Mat");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ int InterpolatorRIFE::interpolate(
|
||||
|
||||
ret = rife_->process(in_mat1, in_mat2, time_step, out_mat);
|
||||
if (ret != 0) {
|
||||
spdlog::error("RIFE processing failed");
|
||||
logger()->error("RIFE processing failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ extern "C" {
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace processors {
|
||||
|
||||
@@ -32,20 +34,20 @@ int init_libplacebo(
|
||||
&vk_hw_device_ctx, AV_HWDEVICE_TYPE_VULKAN, std::to_string(vk_device_index).c_str(), NULL, 0
|
||||
);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Failed to create Vulkan hardware device context for libplacebo.");
|
||||
logger()->error("Failed to create Vulkan hardware device context for libplacebo.");
|
||||
vk_hw_device_ctx = nullptr;
|
||||
}
|
||||
|
||||
AVFilterGraph *graph = avfilter_graph_alloc();
|
||||
if (!graph) {
|
||||
spdlog::error("Unable to create filter graph.");
|
||||
logger()->error("Unable to create filter graph.");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
// Create buffer source
|
||||
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
if (!buffersrc) {
|
||||
spdlog::error("Filter 'buffer' not found.");
|
||||
logger()->error("Filter 'buffer' not found.");
|
||||
avfilter_graph_free(&graph);
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
@@ -82,7 +84,7 @@ int init_libplacebo(
|
||||
spdlog::debug("Buffer source args: {}", args);
|
||||
ret = avfilter_graph_create_filter(buffersrc_ctx, buffersrc, "in", args.c_str(), NULL, graph);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Cannot create buffer source.");
|
||||
logger()->error("Cannot create buffer source.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -92,7 +94,7 @@ int init_libplacebo(
|
||||
// Create the libplacebo filter
|
||||
const AVFilter *libplacebo_filter = avfilter_get_by_name("libplacebo");
|
||||
if (!libplacebo_filter) {
|
||||
spdlog::error("Filter 'libplacebo' not found.");
|
||||
logger()->error("Filter 'libplacebo' not found.");
|
||||
avfilter_graph_free(&graph);
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
@@ -115,7 +117,7 @@ int init_libplacebo(
|
||||
&libplacebo_ctx, libplacebo_filter, "libplacebo", filter_args.c_str(), NULL, graph
|
||||
);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Cannot create libplacebo filter.");
|
||||
logger()->error("Cannot create libplacebo filter.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -129,7 +131,7 @@ int init_libplacebo(
|
||||
// Link buffersrc to libplacebo
|
||||
ret = avfilter_link(last_filter, 0, libplacebo_ctx, 0);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error connecting buffersrc to libplacebo filter.");
|
||||
logger()->error("Error connecting buffersrc to libplacebo filter.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -140,7 +142,7 @@ int init_libplacebo(
|
||||
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, graph);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Cannot create buffer sink.");
|
||||
logger()->error("Cannot create buffer sink.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -148,7 +150,7 @@ int init_libplacebo(
|
||||
// Link libplacebo to buffersink
|
||||
ret = avfilter_link(last_filter, 0, *buffersink_ctx, 0);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error connecting libplacebo filter to buffersink.");
|
||||
logger()->error("Error connecting libplacebo filter to buffersink.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
@@ -156,7 +158,7 @@ int init_libplacebo(
|
||||
// Configure the filter graph
|
||||
ret = avfilter_graph_config(graph, NULL);
|
||||
if (ret < 0) {
|
||||
spdlog::error("Error configuring the filter graph.");
|
||||
logger()->error("Error configuring the filter graph.");
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ extern "C" {
|
||||
#include "avutils.h"
|
||||
#include "decoder.h"
|
||||
#include "encoder.h"
|
||||
#include "logutils.h"
|
||||
#include "logger_manager.h"
|
||||
#include "processor.h"
|
||||
#include "processor_factory.h"
|
||||
|
||||
@@ -20,16 +20,13 @@ VideoProcessor::VideoProcessor(
|
||||
const encoder::EncoderConfig enc_cfg,
|
||||
const uint32_t vk_device_idx,
|
||||
const AVHWDeviceType hw_device_type,
|
||||
const logutils::Video2xLogLevel log_level,
|
||||
const bool benchmark
|
||||
)
|
||||
: proc_cfg_(proc_cfg),
|
||||
enc_cfg_(enc_cfg),
|
||||
vk_device_idx_(vk_device_idx),
|
||||
hw_device_type_(hw_device_type),
|
||||
benchmark_(benchmark) {
|
||||
set_log_level(log_level);
|
||||
}
|
||||
benchmark_(benchmark) {}
|
||||
|
||||
int VideoProcessor::process(
|
||||
const std::filesystem::path in_fname,
|
||||
@@ -42,7 +39,7 @@ int VideoProcessor::process(
|
||||
// Format and log the error message
|
||||
char errbuf[AV_ERROR_MAX_STRING_SIZE];
|
||||
av_strerror(error_code, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("{}: {}", msg, errbuf);
|
||||
logger()->critical("{}: {}", msg, errbuf);
|
||||
|
||||
// Set the video processor state to failed and return the error code
|
||||
state_.store(VideoProcessorState::Failed);
|
||||
@@ -167,7 +164,7 @@ int VideoProcessor::process_frames(
|
||||
av_frame_alloc(), &avutils::av_frame_deleter
|
||||
);
|
||||
if (frame == nullptr) {
|
||||
spdlog::critical("Error allocating frame");
|
||||
logger()->critical("Error allocating frame");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -176,7 +173,7 @@ int VideoProcessor::process_frames(
|
||||
av_packet_alloc(), &avutils::av_packet_deleter
|
||||
);
|
||||
if (packet == nullptr) {
|
||||
spdlog::critical("Error allocating packet");
|
||||
logger()->critical("Error allocating packet");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
@@ -205,7 +202,7 @@ int VideoProcessor::process_frames(
|
||||
break;
|
||||
}
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error reading packet: {}", errbuf);
|
||||
logger()->critical("Error reading packet: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -214,7 +211,7 @@ int VideoProcessor::process_frames(
|
||||
ret = avcodec_send_packet(dec_ctx, packet.get());
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error sending packet to decoder: {}", errbuf);
|
||||
logger()->critical("Error sending packet to decoder: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -233,7 +230,7 @@ int VideoProcessor::process_frames(
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error decoding video frame: {}", errbuf);
|
||||
logger()->critical("Error decoding video frame: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -251,7 +248,7 @@ int VideoProcessor::process_frames(
|
||||
break;
|
||||
}
|
||||
default:
|
||||
spdlog::critical("Unknown processing mode");
|
||||
logger()->critical("Unknown processing mode");
|
||||
return -1;
|
||||
}
|
||||
if (ret < 0 && ret != AVERROR(EAGAIN)) {
|
||||
@@ -275,7 +272,7 @@ int VideoProcessor::process_frames(
|
||||
ret = processor->flush(raw_flushed_frames);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error flushing filter: {}", errbuf);
|
||||
logger()->critical("Error flushing filter: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -298,7 +295,7 @@ int VideoProcessor::process_frames(
|
||||
ret = encoder.flush();
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error flushing encoder: {}", errbuf);
|
||||
logger()->critical("Error flushing encoder: {}", errbuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -313,7 +310,7 @@ int VideoProcessor::write_frame(AVFrame *frame, encoder::Encoder &encoder) {
|
||||
ret = encoder.write_frame(frame, frame_idx_);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error encoding/writing frame: {}", errbuf);
|
||||
logger()->critical("Error encoding/writing frame: {}", errbuf);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@@ -338,7 +335,7 @@ int VideoProcessor::write_raw_packet(
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
if (ret < 0) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error muxing audio/subtitle packet: {}", errbuf);
|
||||
logger()->critical("Error muxing audio/subtitle packet: {}", errbuf);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -361,7 +358,7 @@ int VideoProcessor::process_filtering(
|
||||
// Write the processed frame
|
||||
if (ret < 0 && ret != AVERROR(EAGAIN)) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error filtering frame: {}", errbuf);
|
||||
logger()->critical("Error filtering frame: {}", errbuf);
|
||||
} else if (ret == 0 && proc_frame != nullptr) {
|
||||
auto processed_frame = std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)>(
|
||||
proc_frame, &avutils::av_frame_deleter
|
||||
@@ -420,7 +417,7 @@ int VideoProcessor::process_interpolation(
|
||||
// Write the interpolated frame
|
||||
if (ret < 0 && ret != AVERROR(EAGAIN)) {
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
spdlog::critical("Error interpolating frame: {}", errbuf);
|
||||
logger()->critical("Error interpolating frame: {}", errbuf);
|
||||
return ret;
|
||||
} else if (ret == 0 && proc_frame != nullptr) {
|
||||
auto processed_frame = std::unique_ptr<AVFrame, decltype(&avutils::av_frame_deleter)>(
|
||||
|
||||
110
src/logger_manager.cpp
Normal file
110
src/logger_manager.cpp
Normal file
@@ -0,0 +1,110 @@
|
||||
#include "logger_manager.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/log.h>
|
||||
}
|
||||
|
||||
#include <spdlog/sinks/stdout_color_sinks.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
static spdlog::level::level_enum ffmpeg_level_to_spdlog(int av_level) {
|
||||
if (av_level <= AV_LOG_PANIC) {
|
||||
return spdlog::level::critical;
|
||||
} else if (av_level <= AV_LOG_ERROR) {
|
||||
return spdlog::level::err;
|
||||
} else if (av_level <= AV_LOG_WARNING) {
|
||||
return spdlog::level::warn;
|
||||
} else if (av_level <= AV_LOG_INFO) {
|
||||
return spdlog::level::info;
|
||||
} else if (av_level <= AV_LOG_VERBOSE) {
|
||||
return spdlog::level::debug;
|
||||
} else if (av_level == AV_LOG_DEBUG) {
|
||||
return spdlog::level::debug;
|
||||
} else {
|
||||
// AV_LOG_TRACE or beyond (if supported by FFmpeg)
|
||||
return spdlog::level::trace;
|
||||
}
|
||||
}
|
||||
|
||||
static void ffmpeg_log_callback(void *, int av_level, const char *fmt, va_list vargs) {
|
||||
// Format the message into a buffer
|
||||
char buffer[1024];
|
||||
vsnprintf(buffer, sizeof(buffer), fmt, vargs);
|
||||
|
||||
// Trim trailing newlines
|
||||
std::string message = buffer;
|
||||
while (!message.empty() && (message.back() == '\n' || message.back() == '\r')) {
|
||||
message.pop_back();
|
||||
}
|
||||
|
||||
// Forward FFmpeg log message to the logger instance
|
||||
video2x::logger()->log(ffmpeg_level_to_spdlog(av_level), message);
|
||||
}
|
||||
|
||||
namespace video2x {
|
||||
namespace logger_manager {
|
||||
|
||||
LoggerManager::LoggerManager() {
|
||||
auto console_sink = std::make_shared<spdlog::sinks::stdout_color_sink_mt>();
|
||||
console_sink->set_pattern("%+");
|
||||
logger_ = std::make_shared<spdlog::logger>("video2x", console_sink);
|
||||
spdlog::register_logger(logger_);
|
||||
logger_->set_level(spdlog::level::info);
|
||||
}
|
||||
|
||||
LoggerManager &LoggerManager::instance() {
|
||||
static LoggerManager instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
std::shared_ptr<spdlog::logger> LoggerManager::logger() {
|
||||
return logger_;
|
||||
}
|
||||
|
||||
void LoggerManager::reconfigure_logger(
|
||||
const std::string &logger_name,
|
||||
const std::vector<spdlog::sink_ptr> &sinks,
|
||||
const std::string &pattern
|
||||
) {
|
||||
if (!sinks.empty()) {
|
||||
// If a logger with the same name exists, remove it first
|
||||
auto old_logger = spdlog::get(logger_name);
|
||||
if (old_logger) {
|
||||
spdlog::drop(logger_name);
|
||||
}
|
||||
|
||||
// Create a new logger with the given name, sinks, and pattern
|
||||
auto new_logger = std::make_shared<spdlog::logger>(logger_name, sinks.begin(), sinks.end());
|
||||
new_logger->set_pattern(pattern);
|
||||
|
||||
// Maintain the log level from the previous logger
|
||||
if (logger_) {
|
||||
new_logger->set_level(logger_->level());
|
||||
}
|
||||
|
||||
// Replace the internal logger_ member and register the new one
|
||||
logger_ = new_logger;
|
||||
spdlog::register_logger(logger_);
|
||||
}
|
||||
}
|
||||
|
||||
bool LoggerManager::set_log_level(const std::string &level_str) {
|
||||
spdlog::level::level_enum log_level = spdlog::level::from_str(level_str);
|
||||
if (log_level == spdlog::level::off && level_str != "off") {
|
||||
// Invalid level_str
|
||||
return false;
|
||||
}
|
||||
logger_->set_level(log_level);
|
||||
return true;
|
||||
}
|
||||
|
||||
void LoggerManager::hook_ffmpeg_logging() {
|
||||
av_log_set_callback(ffmpeg_log_callback);
|
||||
}
|
||||
|
||||
void LoggerManager::unhook_ffmpeg_logging() {
|
||||
av_log_set_callback(nullptr);
|
||||
}
|
||||
|
||||
} // namespace logger_manager
|
||||
} // namespace video2x
|
||||
@@ -1,50 +0,0 @@
|
||||
#include "logutils.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/avutil.h>
|
||||
}
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
namespace video2x {
|
||||
namespace logutils {
|
||||
|
||||
void set_log_level(Video2xLogLevel log_level) {
|
||||
switch (log_level) {
|
||||
case Video2xLogLevel::Trace:
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
spdlog::set_level(spdlog::level::trace);
|
||||
break;
|
||||
case Video2xLogLevel::Debug:
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
spdlog::set_level(spdlog::level::debug);
|
||||
break;
|
||||
case Video2xLogLevel::Info:
|
||||
av_log_set_level(AV_LOG_INFO);
|
||||
spdlog::set_level(spdlog::level::info);
|
||||
break;
|
||||
case Video2xLogLevel::Warning:
|
||||
av_log_set_level(AV_LOG_WARNING);
|
||||
spdlog::set_level(spdlog::level::warn);
|
||||
break;
|
||||
case Video2xLogLevel::Error:
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
spdlog::set_level(spdlog::level::err);
|
||||
break;
|
||||
case Video2xLogLevel::Critical:
|
||||
av_log_set_level(AV_LOG_FATAL);
|
||||
spdlog::set_level(spdlog::level::critical);
|
||||
break;
|
||||
case Video2xLogLevel::Off:
|
||||
av_log_set_level(AV_LOG_QUIET);
|
||||
spdlog::set_level(spdlog::level::off);
|
||||
break;
|
||||
default:
|
||||
av_log_set_level(AV_LOG_INFO);
|
||||
spdlog::set_level(spdlog::level::info);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace logutils
|
||||
} // namespace video2x
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "filter_libplacebo.h"
|
||||
#include "filter_realesrgan.h"
|
||||
#include "interpolator_rife.h"
|
||||
#include "logger_manager.h"
|
||||
|
||||
namespace video2x {
|
||||
namespace processors {
|
||||
@@ -36,7 +37,7 @@ std::unique_ptr<Processor> ProcessorFactory::create_processor(
|
||||
) const {
|
||||
auto it = creators.find(proc_cfg.processor_type);
|
||||
if (it == creators.end()) {
|
||||
spdlog::critical(
|
||||
logger()->critical(
|
||||
"Processor type not registered: {}", static_cast<int>(proc_cfg.processor_type)
|
||||
);
|
||||
return nullptr;
|
||||
@@ -54,11 +55,11 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
|
||||
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
|
||||
const auto &config = std::get<LibplaceboConfig>(proc_cfg.config);
|
||||
if (config.shader_path.empty()) {
|
||||
spdlog::critical("Shader path must be provided for the libplacebo filter");
|
||||
logger()->critical("Shader path must be provided for the libplacebo filter");
|
||||
return nullptr;
|
||||
}
|
||||
if (proc_cfg.width <= 0 || proc_cfg.height <= 0) {
|
||||
spdlog::critical(
|
||||
logger()->critical(
|
||||
"Output width and height must be provided for the libplacebo filter"
|
||||
);
|
||||
return nullptr;
|
||||
@@ -78,11 +79,11 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
|
||||
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
|
||||
const auto &config = std::get<RealESRGANConfig>(proc_cfg.config);
|
||||
if (proc_cfg.scaling_factor <= 0) {
|
||||
spdlog::critical("Scaling factor must be provided for the RealESRGAN filter");
|
||||
logger()->critical("Scaling factor must be provided for the RealESRGAN filter");
|
||||
return nullptr;
|
||||
}
|
||||
if (config.model_name.empty()) {
|
||||
spdlog::critical("Model name must be provided for the RealESRGAN filter");
|
||||
logger()->critical("Model name must be provided for the RealESRGAN filter");
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<FilterRealesrgan>(
|
||||
@@ -100,7 +101,7 @@ void ProcessorFactory::init_default_processors(ProcessorFactory &factory) {
|
||||
uint32_t vk_device_index) -> std::unique_ptr<Processor> {
|
||||
const auto &cfg = std::get<RIFEConfig>(proc_cfg.config);
|
||||
if (cfg.model_name.empty()) {
|
||||
spdlog::critical("Model name must be provided for the RIFE filter");
|
||||
logger()->critical("Model name must be provided for the RIFE filter");
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<InterpolatorRIFE>(
|
||||
|
||||
Reference in New Issue
Block a user