mirror of
https://github.com/YaoFANGUK/video-subtitle-remover.git
synced 2026-05-18 03:27:33 +08:00
初步支持 macOS (Apple Silicon)
升级至PP-OCRv5 Tested with Python 3.13 感谢用户 "期望" 提供的macOS设备用于开发测试
This commit is contained in:
@@ -395,7 +395,7 @@ class SubtitleRemover:
|
||||
accelerator_name = self.hardware_accelerator.accelerator_name
|
||||
if accelerator_name == 'DirectML' and config.inpaintMode.value in [InpaintMode.STTN_AUTO, InpaintMode.STTN_DET]:
|
||||
model_device = 'DirectML'
|
||||
if self.hardware_accelerator.has_cuda():
|
||||
if self.hardware_accelerator.has_cuda() or self.hardware_accelerator.has_mps():
|
||||
model_device = accelerator_name
|
||||
self.append_output(tr['Main']['UseModel'].format(f"{model_friendly_name} ({model_device})"))
|
||||
|
||||
@@ -446,7 +446,7 @@ class SubtitleRemover:
|
||||
@cached_property
|
||||
def lama_inpaint(self):
|
||||
model_path = os.path.join(self.model_config.LAMA_MODEL_DIR, 'big-lama.pt')
|
||||
device = self.hardware_accelerator.device if self.hardware_accelerator.has_cuda() else torch.device("cpu")
|
||||
device = self.hardware_accelerator.device if self.hardware_accelerator.has_cuda() or self.hardware_accelerator.has_mps() else torch.device("cpu")
|
||||
return LamaInpaint(device, model_path)
|
||||
|
||||
@cached_property
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
filename,filesize,encoding,header
|
||||
inference_1.pdiparams,50000000,,
|
||||
inference_2.pdiparams,50000000,,
|
||||
inference_3.pdiparams,13295054,,
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
3
backend/models/V5/ch_det/fs_manifest.csv
Normal file
3
backend/models/V5/ch_det/fs_manifest.csv
Normal file
@@ -0,0 +1,3 @@
|
||||
filename,filesize,encoding,header
|
||||
|
||||
inference_1.onnx,50000000,,
|
||||
|
Binary file not shown.
Binary file not shown.
BIN
backend/models/V5/ch_det_fast/inference.onnx
Normal file
BIN
backend/models/V5/ch_det_fast/inference.onnx
Normal file
Binary file not shown.
@@ -37,9 +37,11 @@ def is_video_or_image(filename):
|
||||
# 检查扩展名是否在定义的视频或图片文件后缀集合中
|
||||
return file_extension in video_extensions or file_extension in image_extensions
|
||||
|
||||
def merge_big_file_if_not_exists(dir, file):
|
||||
def merge_big_file_if_not_exists(dir, file, man_filename = None):
|
||||
if file not in os.listdir(dir):
|
||||
fs = Filesplit()
|
||||
if man_filename is not None:
|
||||
fs.man_filename = man_filename
|
||||
fs.merge(input_dir=dir)
|
||||
|
||||
def get_readable_path(path):
|
||||
|
||||
@@ -21,6 +21,7 @@ class HardwareAccelerator:
|
||||
def __init__(self):
|
||||
self.__cuda = False
|
||||
self.__dml = False
|
||||
self.__mps = False
|
||||
self.__onnx_providers = []
|
||||
self.__enabled = True
|
||||
self.__device = None
|
||||
@@ -28,6 +29,7 @@ class HardwareAccelerator:
|
||||
def initialize(self):
|
||||
self.check_directml_available()
|
||||
self.check_cuda_available()
|
||||
self.check_mps_available()
|
||||
self.load_onnx_providers()
|
||||
|
||||
def check_directml_available(self):
|
||||
@@ -36,6 +38,9 @@ class HardwareAccelerator:
|
||||
def check_cuda_available(self):
|
||||
self.__cuda = torch.cuda.is_available()
|
||||
|
||||
def check_mps_available(self):
|
||||
self.__mps = torch.backends.mps.is_available() and torch.backends.mps.is_built()
|
||||
|
||||
def load_onnx_providers(self):
|
||||
try:
|
||||
import onnxruntime as ort
|
||||
@@ -65,7 +70,7 @@ class HardwareAccelerator:
|
||||
def has_accelerator(self):
|
||||
if not self.__enabled:
|
||||
return False
|
||||
return self.__cuda or self.__dml or len(self.__onnx_providers) > 0
|
||||
return self.__cuda or self.__dml or self.__mps or len(self.__onnx_providers) > 0
|
||||
|
||||
@property
|
||||
def accelerator_name(self):
|
||||
@@ -75,6 +80,8 @@ class HardwareAccelerator:
|
||||
return "DirectML"
|
||||
if self.__cuda:
|
||||
return "GPU"
|
||||
if self.__mps:
|
||||
return "MPS"
|
||||
elif len(self.__onnx_providers) > 0:
|
||||
return ", ".join(self.__onnx_providers)
|
||||
else:
|
||||
@@ -83,7 +90,7 @@ class HardwareAccelerator:
|
||||
@property
|
||||
def onnx_providers(self):
|
||||
if not self.__enabled:
|
||||
return []
|
||||
return ["CPUExecutionProvider"]
|
||||
return self.__onnx_providers
|
||||
|
||||
def has_cuda(self):
|
||||
@@ -91,6 +98,11 @@ class HardwareAccelerator:
|
||||
return False
|
||||
return self.__cuda
|
||||
|
||||
def has_mps(self):
|
||||
if not self.__enabled:
|
||||
return False
|
||||
return self.__mps
|
||||
|
||||
def set_enabled(self, enable):
|
||||
self.__enabled = enable
|
||||
|
||||
@@ -117,4 +129,6 @@ class HardwareAccelerator:
|
||||
self.__dml = False
|
||||
if self.__cuda:
|
||||
return torch.device("cuda:0")
|
||||
if self.__mps:
|
||||
return torch.device("mps")
|
||||
return torch.device("cpu")
|
||||
@@ -10,54 +10,13 @@ class ModelConfig:
|
||||
self.STTN_DET_MODEL_PATH = os.path.join(BASE_DIR, 'models', 'sttn-det', 'sttn.pth')
|
||||
self.PROPAINTER_MODEL_DIR = os.path.join(BASE_DIR,'models', 'propainter')
|
||||
if config.subtitleDetectMode.value == SubtitleDetectMode.Fast:
|
||||
self.DET_MODEL_DIR = os.path.join(BASE_DIR,'models', 'V4', 'ch_det_fast')
|
||||
self.DET_MODEL_DIR = os.path.join(BASE_DIR,'models', 'V5', 'ch_det_fast')
|
||||
elif config.subtitleDetectMode.value == SubtitleDetectMode.Accurate:
|
||||
self.DET_MODEL_DIR = os.path.join(BASE_DIR, 'models', 'V4', 'ch_det')
|
||||
self.DET_MODEL_DIR = os.path.join(BASE_DIR, 'models', 'V5', 'ch_det')
|
||||
else:
|
||||
raise ValueError(f"Invalid subtitle detect mode: {config.subtitleDetectMode.value}")
|
||||
|
||||
merge_big_file_if_not_exists(self.LAMA_MODEL_DIR, 'bit-lama.pt')
|
||||
merge_big_file_if_not_exists(self.PROPAINTER_MODEL_DIR, 'ProPainter.pth')
|
||||
merge_big_file_if_not_exists(self.DET_MODEL_DIR, 'inference.pdiparams')
|
||||
|
||||
def convertToOnnxModelIfNeeded(self, model_dir, model_filename="inference.pdmodel", params_filename="inference.pdiparams", opset_version=14):
|
||||
"""Converts a Paddle model to ONNX if ONNX providers are available and the model does not already exist."""
|
||||
|
||||
onnx_model_path = os.path.join(model_dir, "model.onnx")
|
||||
|
||||
if os.path.exists(onnx_model_path):
|
||||
print(f"ONNX model already exists: {onnx_model_path}. Skipping conversion.")
|
||||
return onnx_model_path
|
||||
|
||||
print(f"Converting Paddle model {model_dir} to ONNX...")
|
||||
model_file = os.path.join(model_dir, model_filename)
|
||||
params_file = os.path.join(model_dir, params_filename) if params_filename else ""
|
||||
|
||||
try:
|
||||
import paddle2onnx
|
||||
# Ensure the target directory exists
|
||||
os.makedirs(os.path.dirname(onnx_model_path), exist_ok=True)
|
||||
|
||||
# Convert and save the model
|
||||
onnx_model = paddle2onnx.export(
|
||||
model_filename=model_file,
|
||||
params_filename=params_file,
|
||||
save_file=onnx_model_path,
|
||||
opset_version=opset_version,
|
||||
auto_upgrade_opset=True,
|
||||
verbose=True,
|
||||
enable_onnx_checker=True,
|
||||
enable_experimental_op=True,
|
||||
enable_optimize=True,
|
||||
custom_op_info={},
|
||||
deploy_backend="onnxruntime",
|
||||
calibration_file="calibration.cache",
|
||||
external_file=os.path.join(model_dir, "external_data"),
|
||||
export_fp16_model=False,
|
||||
)
|
||||
|
||||
print(f"Conversion successful. ONNX model saved to: {onnx_model_path}")
|
||||
return onnx_model_path
|
||||
except Exception as e:
|
||||
print(f"Error during conversion: {e}")
|
||||
return model_dir
|
||||
merge_big_file_if_not_exists(self.DET_MODEL_DIR, 'inference.onnx')
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
import sys
|
||||
from functools import cached_property
|
||||
|
||||
@@ -33,8 +34,8 @@ class SubtitleDetect:
|
||||
parser = utility.init_args()
|
||||
args = parser.parse_args([])
|
||||
args.det_algorithm = 'DB'
|
||||
args.det_model_dir = model_config.convertToOnnxModelIfNeeded(model_config.DET_MODEL_DIR) if len(onnx_providers) > 0 else model_config.DET_MODEL_DIR
|
||||
args.use_gpu=hardware_accelerator.has_cuda()
|
||||
args.det_model_dir = os.path.join(model_config.DET_MODEL_DIR, 'inference.onnx') if len(onnx_providers) > 0 else model_config.DET_MODEL_DIR
|
||||
args.use_gpu=False
|
||||
args.use_onnx=len(onnx_providers) > 0
|
||||
args.onnx_providers=onnx_providers
|
||||
return TextDetector(args)
|
||||
|
||||
Reference in New Issue
Block a user