Files
video-subtitle-remover/backend/tools/hardware_accelerator.py
Jason f78e985e1c 使用PySide6-Fluent-Widgets重构整套UI
添加任务列表组件并优化视频加载逻辑
支持可视化显示字幕区域
整理所有模型, 分别为STTN智能擦除, STTN字幕检测, LAMA, ProPainter, OpenCV
提高处理性能
新增CPU运行模式并优化多语言支持
修复Propainter模式部分视频报错

本次提交新增了CPU运行模式,适用于无GPU加速的场景。同时,优化了多语言支持,新增了日语、韩语、越南语等语言配置文件,并更新了README文档以反映新的运行模式和多语言支持。此外,修复了部分代码逻辑,提升了系统的稳定性和兼容性。
2025-05-22 08:41:59 +08:00

120 lines
4.3 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import traceback
import importlib.util
import torch
from backend.config import tr
class HardwareAccelerator:
# 类变量,用于存储单例实例
_instance = None
@classmethod
def instance(cls):
"""获取单例实例"""
if cls._instance is None:
cls._instance = HardwareAccelerator()
cls._instance.initialize()
return cls._instance
def __init__(self):
self.__cuda = False
self.__dml = False
self.__onnx_providers = []
self.__enabled = True
self.__device = None
def initialize(self):
self.check_directml_available()
self.check_cuda_available()
self.load_onnx_providers()
def check_directml_available(self):
self.__dml = importlib.util.find_spec("torch_directml")
def check_cuda_available(self):
self.__cuda = torch.cuda.is_available()
def load_onnx_providers(self):
try:
import onnxruntime as ort
available_providers = ort.get_available_providers()
for provider in available_providers:
if provider in [
"CPUExecutionProvider"
]:
continue
if provider not in [
"DmlExecutionProvider", # DirectML适用于 Windows GPU
"ROCMExecutionProvider", # AMD ROCm
"MIGraphXExecutionProvider", # AMD MIGraphX
"VitisAIExecutionProvider", # AMD VitisAI适用于 RyzenAI & Windows, 实测和DirectML性能似乎差不多
"OpenVINOExecutionProvider", # Intel GPU
"MetalExecutionProvider", # Apple macOS
"CoreMLExecutionProvider", # Apple macOS
"CUDAExecutionProvider", # Nvidia GPU
]:
print(tr['Main']['OnnxExectionProviderNotSupportedSkipped'].format(provider))
continue
print(tr['Main']['OnnxExecutionProviderDetected'].format(provider))
self.__onnx_providers.append(provider)
except ModuleNotFoundError as e:
print(tr['Main']['OnnxRuntimeNotInstall'])
def has_accelerator(self):
if not self.__enabled:
return False
return self.__cuda or self.__dml or len(self.__onnx_providers) > 0
@property
def accelerator_name(self):
if not self.__enabled:
return "CPU"
if self.__dml:
return "DirectML"
if self.__cuda:
return "GPU"
elif len(self.__onnx_providers) > 0:
return ", ".join(self.__onnx_providers)
else:
return "CPU"
@property
def onnx_providers(self):
if not self.__enabled:
return []
return self.__onnx_providers
def has_cuda(self):
if not self.__enabled:
return False
return self.__cuda
def set_enabled(self, enable):
self.__enabled = enable
@property
def device(self):
"""
onnxruntime-directml 1.21.1-1.22.0(往上未测试) 和 torch-directml 不能同时初始化, 会相互影响
提示site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 266, in run
return self._sess.run(output_names, input_feed, run_options)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb2 in position 344: invalid start bn 344: invalid start byte
onnxruntime-directml 1.21.1 则正常, 但Win10跑不起来, Win11正常
为了避免冲突以及避免重写一个QPT智能部署流程, 这里采用延迟初始化的方式+继续使用onnxruntime-directml 1.20.1
当然SubtitleDetect放到一个独立进程去操作也是可以的
"""
if self.__enabled:
if self.__dml:
try:
import torch_directml
return torch_directml.device(torch_directml.default_device())
self.__dml = True
except:
traceback.print_exc()
self.__dml = False
if self.__cuda:
return torch.device("cuda:0")
return torch.device("cpu")