mirror of
https://github.com/k4yt3x/video2x.git
synced 2026-02-10 14:54:46 +08:00
Compare commits
9 Commits
5.0.0-beta
...
5.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c07fafc0e9 | ||
|
|
595b179d3c | ||
|
|
c865d494a1 | ||
|
|
f4acb2188d | ||
|
|
01d4006c75 | ||
|
|
a7f0f34751 | ||
|
|
b6b1bf9f0e | ||
|
|
5d7a53a2fc | ||
|
|
b32e0ec132 |
@@ -26,11 +26,11 @@ Nightly releases are automatically created by the GitHub Actions CI/CD pipelines
|
||||
|
||||
## [📦 Container Image](https://github.com/k4yt3x/video2x/pkgs/container/video2x)
|
||||
|
||||
Video2X container images are available on the GitHub Container Registry for easy deployment on Linux and macOS. If you already have Docker/Podman installed, only one command is needed to start upscaling a video. For more information on how to use Video2X's Docker image, please refer to the [documentations (outdated)](https://github.com/K4YT3X/video2x/wiki/Docker).
|
||||
Video2X container images are available on the GitHub Container Registry for easy deployment on Linux and macOS. If you already have Docker/Podman installed, only one command is needed to start upscaling a video. For more information on how to use Video2X's Docker image, please refer to the [documentations](https://github.com/K4YT3X/video2x/wiki/Container).
|
||||
|
||||
## [📖 Documentations](https://github.com/k4yt3x/video2x/wiki)
|
||||
|
||||
Video2X's documentations are hosted on this repository's [Wiki page](https://github.com/k4yt3x/video2x/wiki). It includes comprehensive explanations for how to use the [GUI](https://github.com/k4yt3x/video2x/wiki/GUI), the [CLI](https://github.com/k4yt3x/video2x/wiki/CLI), the [container image](https://github.com/K4YT3X/video2x/wiki/Container), the library, and more. The Wiki is open to edits by the community, so you, yes you, can also correct errors or add new contents to the documentations.
|
||||
Video2X's documentations are hosted on this repository's [Wiki page](https://github.com/k4yt3x/video2x/wiki). It includes comprehensive explanations for how to use the [GUI](https://github.com/k4yt3x/video2x/wiki/GUI), the [CLI](https://github.com/k4yt3x/video2x/wiki/CLI), the [container image](https://github.com/K4YT3X/video2x/wiki/Container), the [library](https://github.com/k4yt3x/video2x/wiki/Library), and more. The Wiki is open to edits by the community, so you, yes you, can also correct errors or add new contents to the documentations.
|
||||
|
||||
## Introduction
|
||||
|
||||
|
||||
10
examples/container-run-interpolate-rife.sh
Executable file
10
examples/container-run-interpolate-rife.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
sudo podman run \
|
||||
-it --rm --gpus all -v /dev/dri:/dev/dri \
|
||||
-v $PWD/data:/host \
|
||||
ghcr.io/k4yt3x/video2x:5.0.0-beta3-cuda \
|
||||
-i input.mp4 -o output.mp4 \
|
||||
interpolate
|
||||
11
examples/container-run-upscale-waifu2x.sh
Executable file
11
examples/container-run-upscale-waifu2x.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
sudo podman run \
|
||||
-it --rm --gpus all -v /dev/dri:/dev/dri \
|
||||
-v $PWD/data:/host \
|
||||
ghcr.io/k4yt3x/video2x:5.0.0-beta3-cuda \
|
||||
-i input.mp4 -o output.mp4 \
|
||||
-p5 upscale \
|
||||
-h 720 -a waifu2x -n3
|
||||
21
examples/run_interpolate_rife.py
Executable file
21
examples/run_interpolate_rife.py
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# built-in imports
|
||||
import pathlib
|
||||
|
||||
# import video2x
|
||||
from video2x import Video2X
|
||||
|
||||
|
||||
# create video2x object
|
||||
video2x = Video2X()
|
||||
|
||||
# run upscale
|
||||
video2x.interpolate(
|
||||
pathlib.Path("input.mp4"), # input video path
|
||||
pathlib.Path("output.mp4"), # another
|
||||
3, # processes: number of parallel processors
|
||||
10, # threshold: adjacent frames with > n% diff won't be processed (100 == process all)
|
||||
"rife", # algorithm: the algorithm to use to process the video
|
||||
)
|
||||
24
examples/run_upscale_waifu2x.py
Executable file
24
examples/run_upscale_waifu2x.py
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# built-in imports
|
||||
import pathlib
|
||||
|
||||
# import video2x
|
||||
from video2x import Video2X
|
||||
|
||||
|
||||
# create video2x object
|
||||
video2x = Video2X()
|
||||
|
||||
# run upscale
|
||||
video2x.upscale(
|
||||
pathlib.Path("input.mp4"), # input video path
|
||||
pathlib.Path("output.mp4"), # another
|
||||
None, # width: width of output, None == auto
|
||||
720, # height: height of output, None == auto
|
||||
3, # noise: noise level, algorithm-dependent
|
||||
5, # processes: number of parallel processors
|
||||
0, # threshold: adjacent frames with < n% diff won't be processed (0 == process all)
|
||||
"waifu2x", # algorithm: the algorithm to use to process the video
|
||||
)
|
||||
18
scripts/run-interactive-container.sh
Executable file
18
scripts/run-interactive-container.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
# mount the current (video2x repo root) directory into a container
|
||||
# with drivers installed so the code can be debugged in the container
|
||||
# this one launches an interactive shell instead of Python
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
sudo podman run -it --rm \
|
||||
--gpus all -v /dev/dri:/dev/dri \
|
||||
-v $PWD:/host \
|
||||
-m 15g \
|
||||
--cpus 0.9 \
|
||||
-v $HOME/projects/media2x/video2x:/video2x \
|
||||
-e PYTHONPATH="/video2x" \
|
||||
--entrypoint=/bin/bash \
|
||||
ghcr.io/k4yt3x/video2x:5.0.0-beta1-cuda
|
||||
|
||||
# alias upscale='python3 -m video2x -i /host/input-large.mp4 -o /host/output-large.mp4 -p5 upscale -h 1440 -d waifu2x -n3'
|
||||
18
scripts/run-source-in-container.sh
Executable file
18
scripts/run-source-in-container.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
# mount the current (video2x repo root) directory into a container
|
||||
# with drivers installed so the code can be debugged in the container
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
sudo podman run -it --rm \
|
||||
--gpus all -v /dev/dri:/dev/dri \
|
||||
-v $PWD:/host \
|
||||
-m 15g \
|
||||
--cpus 0.9 \
|
||||
-v $HOME/projects/media2x/video2x:/video2x \
|
||||
-e PYTHONPATH="/video2x" \
|
||||
ghcr.io/k4yt3x/video2x:5.0.0-beta3-cuda \
|
||||
-i data/input.mp4 -o data/output.mp4 \
|
||||
-p5 \
|
||||
upscale \
|
||||
-h 1440 -a waifu2x -n3
|
||||
@@ -24,7 +24,7 @@ Last Modified: February 11, 2022
|
||||
|
||||
# version assignment has to precede imports to
|
||||
# prevent setup.cfg from producing import errors
|
||||
__version__ = "5.0.0-beta2"
|
||||
__version__ = "5.0.0-beta3"
|
||||
|
||||
# local imports
|
||||
from .video2x import Video2X
|
||||
|
||||
@@ -38,7 +38,7 @@ from PIL import ImageChops, ImageStat
|
||||
from loguru import logger
|
||||
|
||||
|
||||
DRIVER_CLASSES = {"rife": Rife}
|
||||
ALGORITHM_CLASSES = {"rife": Rife}
|
||||
|
||||
|
||||
class Interpolator(multiprocessing.Process):
|
||||
@@ -57,7 +57,7 @@ class Interpolator(multiprocessing.Process):
|
||||
def run(self):
|
||||
self.running = True
|
||||
logger.info(f"Interpolator process {self.name} initiating")
|
||||
driver_objects = {}
|
||||
processor_objects = {}
|
||||
while self.running:
|
||||
try:
|
||||
try:
|
||||
@@ -65,7 +65,7 @@ class Interpolator(multiprocessing.Process):
|
||||
(
|
||||
frame_index,
|
||||
(image0, image1),
|
||||
(difference_threshold, driver),
|
||||
(difference_threshold, algorithm),
|
||||
) = self.processing_queue.get(False)
|
||||
except queue.Empty:
|
||||
time.sleep(0.1)
|
||||
@@ -86,13 +86,13 @@ class Interpolator(multiprocessing.Process):
|
||||
# process the interpolation
|
||||
if difference_ratio < difference_threshold:
|
||||
|
||||
# select a driver object with the required settings
|
||||
# select a processor object with the required settings
|
||||
# create a new object if none are available
|
||||
driver_object = driver_objects.get(driver)
|
||||
if driver_object is None:
|
||||
driver_object = DRIVER_CLASSES[driver](0)
|
||||
driver_objects[driver] = driver_object
|
||||
interpolated_image = driver_object.process(image0, image1)
|
||||
processor_object = processor_objects.get(algorithm)
|
||||
if processor_object is None:
|
||||
processor_object = ALGORITHM_CLASSES[algorithm](0)
|
||||
processor_objects[algorithm] = processor_object
|
||||
interpolated_image = processor_object.process(image0, image1)
|
||||
|
||||
# if the difference is greater than threshold
|
||||
# there's a change in camera angle, ignore
|
||||
|
||||
@@ -40,15 +40,15 @@ import time
|
||||
from PIL import Image, ImageChops, ImageStat
|
||||
from loguru import logger
|
||||
|
||||
# fixed scaling ratios supported by the drivers
|
||||
# fixed scaling ratios supported by the algorithms
|
||||
# that only support certain fixed scale ratios
|
||||
DRIVER_FIXED_SCALING_RATIOS = {
|
||||
ALGORITHM_FIXED_SCALING_RATIOS = {
|
||||
"waifu2x": [1, 2],
|
||||
"srmd": [2, 3, 4],
|
||||
"realsr": [4],
|
||||
}
|
||||
|
||||
DRIVER_CLASSES = {"waifu2x": Waifu2x, "srmd": Srmd, "realsr": Realsr}
|
||||
ALGORITHM_CLASSES = {"waifu2x": Waifu2x, "srmd": Srmd, "realsr": Realsr}
|
||||
|
||||
|
||||
class Upscaler(multiprocessing.Process):
|
||||
@@ -69,7 +69,7 @@ class Upscaler(multiprocessing.Process):
|
||||
logger.opt(colors=True).info(
|
||||
f"Upscaler process <blue>{self.name}</blue> initiating"
|
||||
)
|
||||
driver_objects = {}
|
||||
processor_objects = {}
|
||||
while self.running:
|
||||
try:
|
||||
try:
|
||||
@@ -82,7 +82,7 @@ class Upscaler(multiprocessing.Process):
|
||||
output_height,
|
||||
noise,
|
||||
difference_threshold,
|
||||
driver,
|
||||
algorithm,
|
||||
),
|
||||
) = self.processing_queue.get(False)
|
||||
|
||||
@@ -123,9 +123,9 @@ class Upscaler(multiprocessing.Process):
|
||||
# calculate required minimum scale ratio
|
||||
output_scale = max(output_width / width, output_height / height)
|
||||
|
||||
# select the optimal driver scaling ratio to use
|
||||
# select the optimal algorithm scaling ratio to use
|
||||
supported_scaling_ratios = sorted(
|
||||
DRIVER_FIXED_SCALING_RATIOS[driver]
|
||||
ALGORITHM_FIXED_SCALING_RATIOS[algorithm]
|
||||
)
|
||||
|
||||
remaining_scaling_ratio = math.ceil(output_scale)
|
||||
@@ -163,17 +163,17 @@ class Upscaler(multiprocessing.Process):
|
||||
|
||||
for job in scaling_jobs:
|
||||
|
||||
# select a driver object with the required settings
|
||||
# select a processor object with the required settings
|
||||
# create a new object if none are available
|
||||
driver_object = driver_objects.get((driver, job))
|
||||
if driver_object is None:
|
||||
driver_object = DRIVER_CLASSES[driver](
|
||||
processor_object = processor_objects.get((algorithm, job))
|
||||
if processor_object is None:
|
||||
processor_object = ALGORITHM_CLASSES[algorithm](
|
||||
scale=job, noise=noise
|
||||
)
|
||||
driver_objects[(driver, job)] = driver_object
|
||||
processor_objects[(algorithm, job)] = processor_object
|
||||
|
||||
# process the image with the selected driver
|
||||
image1 = driver_object.process(image1)
|
||||
# process the image with the selected algorithm
|
||||
image1 = processor_object.process(image1)
|
||||
|
||||
# downscale the image to the desired output size and save the image to disk
|
||||
image1 = image1.resize((output_width, output_height), Image.LANCZOS)
|
||||
|
||||
@@ -81,21 +81,15 @@ Contact: k4yt3x@k4yt3x.com""".format(
|
||||
__version__
|
||||
)
|
||||
|
||||
UPSCALING_DRIVERS = [
|
||||
# algorithms available for upscaling tasks
|
||||
UPSCALING_ALGORITHMS = [
|
||||
"waifu2x",
|
||||
"srmd",
|
||||
"realsr",
|
||||
]
|
||||
|
||||
INTERPOLATION_DRIVERS = ["rife"]
|
||||
|
||||
# fixed scaling ratios supported by the drivers
|
||||
# that only support certain fixed scale ratios
|
||||
DRIVER_FIXED_SCALING_RATIOS = {
|
||||
"waifu2x": [1, 2],
|
||||
"srmd": [2, 3, 4],
|
||||
"realsr": [4],
|
||||
}
|
||||
# algorithms available for frame interpolation tasks
|
||||
INTERPOLATION_ALGORITHMS = ["rife"]
|
||||
|
||||
# progress bar labels for different modes
|
||||
MODE_LABELS = {"upscale": "Upscaling", "interpolate": "Interpolating"}
|
||||
@@ -279,6 +273,10 @@ class Video2X:
|
||||
logger.exception(e)
|
||||
exception.append(e)
|
||||
|
||||
# if no exceptions were produced
|
||||
else:
|
||||
logger.success("Processing completed successfully")
|
||||
|
||||
finally:
|
||||
# mark processing queue as closed
|
||||
self.processing_queue.close()
|
||||
@@ -319,7 +317,7 @@ class Video2X:
|
||||
noise: int,
|
||||
processes: int,
|
||||
threshold: float,
|
||||
driver: str,
|
||||
algorithm: str,
|
||||
) -> None:
|
||||
|
||||
# get basic video information
|
||||
@@ -354,7 +352,7 @@ class Video2X:
|
||||
output_height,
|
||||
noise,
|
||||
threshold,
|
||||
driver,
|
||||
algorithm,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -364,7 +362,7 @@ class Video2X:
|
||||
output_path: pathlib.Path,
|
||||
processes: int,
|
||||
threshold: float,
|
||||
driver: str,
|
||||
algorithm: str,
|
||||
) -> None:
|
||||
|
||||
# get video basic information
|
||||
@@ -386,7 +384,7 @@ class Video2X:
|
||||
Interpolator,
|
||||
"interpolate",
|
||||
processes,
|
||||
(threshold, driver),
|
||||
(threshold, algorithm),
|
||||
)
|
||||
|
||||
|
||||
@@ -440,11 +438,11 @@ def parse_arguments() -> argparse.Namespace:
|
||||
upscale.add_argument("-h", "--height", type=int, help="output height")
|
||||
upscale.add_argument("-n", "--noise", type=int, help="denoise level", default=3)
|
||||
upscale.add_argument(
|
||||
"-d",
|
||||
"--driver",
|
||||
choices=UPSCALING_DRIVERS,
|
||||
help="driver to use for upscaling",
|
||||
default=UPSCALING_DRIVERS[0],
|
||||
"-a",
|
||||
"--algorithm",
|
||||
choices=UPSCALING_ALGORITHMS,
|
||||
help="algorithm to use for upscaling",
|
||||
default=UPSCALING_ALGORITHMS[0],
|
||||
)
|
||||
upscale.add_argument(
|
||||
"-t",
|
||||
@@ -462,11 +460,11 @@ def parse_arguments() -> argparse.Namespace:
|
||||
"--help", action="help", help="show this help message and exit"
|
||||
)
|
||||
interpolate.add_argument(
|
||||
"-d",
|
||||
"--driver",
|
||||
choices=UPSCALING_DRIVERS,
|
||||
help="driver to use for upscaling",
|
||||
default=INTERPOLATION_DRIVERS[0],
|
||||
"-a",
|
||||
"--algorithm",
|
||||
choices=UPSCALING_ALGORITHMS,
|
||||
help="algorithm to use for upscaling",
|
||||
default=INTERPOLATION_ALGORITHMS[0],
|
||||
)
|
||||
interpolate.add_argument(
|
||||
"-t",
|
||||
@@ -523,7 +521,7 @@ def main():
|
||||
args.noise,
|
||||
args.processes,
|
||||
args.threshold,
|
||||
args.driver,
|
||||
args.algorithm,
|
||||
)
|
||||
|
||||
elif args.action == "interpolate":
|
||||
@@ -532,11 +530,9 @@ def main():
|
||||
args.output,
|
||||
args.processes,
|
||||
args.threshold,
|
||||
args.driver,
|
||||
args.algorithm,
|
||||
)
|
||||
|
||||
logger.success("Processing completed successfully")
|
||||
|
||||
# don't print the traceback for manual terminations
|
||||
except (SystemExit, KeyboardInterrupt) as e:
|
||||
raise SystemExit(e)
|
||||
|
||||
Reference in New Issue
Block a user