22 Commits

Author SHA1 Message Date
k4yt3x
f17d75539c fixed dockerfile label syntax 2022-02-19 01:49:23 +00:00
k4yt3x
82512ef10c updated CI to build multiple versions of containers 2022-02-17 05:43:12 +00:00
k4yt3x
ad479e53b8 renamed Dockerfiles for CI 2022-02-17 05:42:48 +00:00
k4yt3x
a356bfeaff fixed workflow container image name 2022-02-17 05:08:22 +00:00
k4yt3x
dee8e23485 changed get tag command 2022-02-17 04:59:24 +00:00
k4yt3x
1fa0821057 changed container build pipeline to release 2022-02-17 04:36:26 +00:00
k4yt3x
b5ecffba81 added container build workflow 2022-02-17 04:22:27 +00:00
k4yt3x
672c9b8652 bumped version to 5.0.0-beta4 2022-02-17 03:52:04 +00:00
k4yt3x
9f73e75f17 added audio, subtitles, data, and attachments copying 2022-02-17 03:51:55 +00:00
k4yt3x
ef1a8f3e41 added extra args for run scripts 2022-02-17 03:50:38 +00:00
k4yt3x
04f409ef80 updated discussion group heading 2022-02-17 02:01:06 +00:00
k4yt3x
51c8693dce fixed legeal info print 2022-02-16 03:02:26 +00:00
k4yt3x
bb572e2468 fixed argparse help bug 2022-02-16 02:56:35 +00:00
k4yt3x
c07fafc0e9 bumped version to 5.0.0-beta3 2022-02-15 07:02:53 +00:00
k4yt3x
595b179d3c updated Library link 2022-02-15 05:49:24 +00:00
k4yt3x
c865d494a1 updated run source script paths 2022-02-15 02:30:22 +00:00
k4yt3x
f4acb2188d renamed container run scripts 2022-02-15 00:59:40 +00:00
k4yt3x
01d4006c75 changed -d to -a 2022-02-15 00:55:54 +00:00
k4yt3x
a7f0f34751 terminology change: driver -> algorithm 2022-02-15 00:54:17 +00:00
k4yt3x
b6b1bf9f0e added two container debugging scripts 2022-02-15 00:52:28 +00:00
k4yt3x
5d7a53a2fc added Python and shell examples 2022-02-15 00:52:21 +00:00
k4yt3x
b32e0ec132 updated containers documentation path 2022-02-15 00:06:56 +00:00
16 changed files with 285 additions and 107 deletions

61
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,61 @@
name: Release
on:
push:
branches:
- master
tags:
- "*"
jobs:
setup:
name: Setup
runs-on: ubuntu-latest
outputs:
tag: ${{ steps.get_tag.outputs.tag }}
steps:
- name: Get tag
id: get_tag
run: echo ::set-output name=tag::${GITHUB_REF/refs\/tags\//}
create-release:
name: Create release
needs:
- setup
runs-on: ubuntu-latest
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
steps:
- name: Create release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ needs.setup.outputs.tag }}
release_name: Video2X ${{ needs.setup.outputs.tag }}
draft: true
prerelease: false
container:
name: Build and upload container
needs:
- setup
- create-release
strategy:
matrix:
version:
- slim-alpine
- cuda
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- uses: mr-smithers-excellent/docker-build-push@v5
name: Build & push Docker image
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USER }}
password: ${{ secrets.GHCR_TOKEN }}
dockerfile: Dockerfile.${{ matrix.version }}
image: video2x
tags: latest, ${{ needs.setup.outputs.tag }}-${{ matrix.version }}

View File

@@ -18,7 +18,7 @@ RUN apk add --no-cache \
# stage 2: install wheels into final image
FROM docker.io/library/python:3.10.2-alpine3.15
LABEL maintainer="K4YT3X <i@k4yt3x.com>"
LABEL maintainer="K4YT3X <i@k4yt3x.com>" \
org.opencontainers.image.source="https://github.com/k4yt3x/video2x" \
org.opencontainers.image.description="A lossless video/GIF/image upscaler"

View File

@@ -8,7 +8,7 @@
<img src="https://img.shields.io/badge/dynamic/json?color=%23e85b46&label=Patreon&query=data.attributes.patron_count&suffix=%20patrons&url=https%3A%2F%2Fwww.patreon.com%2Fapi%2Fcampaigns%2F4507807&style=flat-square"/>
</p>
### Official [Telegram Discussion Group](https://t.me/video2x)
## [💬 Telegram Discussion Group](https://t.me/video2x)
Join our Telegram discussion group to ask any questions you have about Video2X, chat directly with the developers, or discuss about upscaling technologies and the future of Video2X in general.
@@ -26,11 +26,11 @@ Nightly releases are automatically created by the GitHub Actions CI/CD pipelines
## [📦 Container Image](https://github.com/k4yt3x/video2x/pkgs/container/video2x)
Video2X container images are available on the GitHub Container Registry for easy deployment on Linux and macOS. If you already have Docker/Podman installed, only one command is needed to start upscaling a video. For more information on how to use Video2X's Docker image, please refer to the [documentations (outdated)](https://github.com/K4YT3X/video2x/wiki/Docker).
Video2X container images are available on the GitHub Container Registry for easy deployment on Linux and macOS. If you already have Docker/Podman installed, only one command is needed to start upscaling a video. For more information on how to use Video2X's Docker image, please refer to the [documentations](https://github.com/K4YT3X/video2x/wiki/Container).
## [📖 Documentations](https://github.com/k4yt3x/video2x/wiki)
Video2X's documentations are hosted on this repository's [Wiki page](https://github.com/k4yt3x/video2x/wiki). It includes comprehensive explanations for how to use the [GUI](https://github.com/k4yt3x/video2x/wiki/GUI), the [CLI](https://github.com/k4yt3x/video2x/wiki/CLI), the [container image](https://github.com/K4YT3X/video2x/wiki/Container), the library, and more. The Wiki is open to edits by the community, so you, yes you, can also correct errors or add new contents to the documentations.
Video2X's documentations are hosted on this repository's [Wiki page](https://github.com/k4yt3x/video2x/wiki). It includes comprehensive explanations for how to use the [GUI](https://github.com/k4yt3x/video2x/wiki/GUI), the [CLI](https://github.com/k4yt3x/video2x/wiki/CLI), the [container image](https://github.com/K4YT3X/video2x/wiki/Container), the [library](https://github.com/k4yt3x/video2x/wiki/Library), and more. The Wiki is open to edits by the community, so you, yes you, can also correct errors or add new contents to the documentations.
## Introduction

View File

@@ -0,0 +1,10 @@
#!/bin/sh
set -euxo pipefail
sudo podman run \
-it --rm --gpus all -v /dev/dri:/dev/dri \
-v $PWD/data:/host \
ghcr.io/k4yt3x/video2x:5.0.0-beta3-cuda \
-i input.mp4 -o output.mp4 \
interpolate

View File

@@ -0,0 +1,11 @@
#!/bin/sh
set -euxo pipefail
sudo podman run \
-it --rm --gpus all -v /dev/dri:/dev/dri \
-v $PWD/data:/host \
ghcr.io/k4yt3x/video2x:5.0.0-beta3-cuda \
-i input.mp4 -o output.mp4 \
-p5 upscale \
-h 720 -a waifu2x -n3

View File

@@ -0,0 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# built-in imports
import pathlib
# import video2x
from video2x import Video2X
# create video2x object
video2x = Video2X()
# run upscale
video2x.interpolate(
pathlib.Path("input.mp4"), # input video path
pathlib.Path("output.mp4"), # another
3, # processes: number of parallel processors
10, # threshold: adjacent frames with > n% diff won't be processed (100 == process all)
"rife", # algorithm: the algorithm to use to process the video
)

24
examples/run_upscale_waifu2x.py Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# built-in imports
import pathlib
# import video2x
from video2x import Video2X
# create video2x object
video2x = Video2X()
# run upscale
video2x.upscale(
pathlib.Path("input.mp4"), # input video path
pathlib.Path("output.mp4"), # another
None, # width: width of output, None == auto
720, # height: height of output, None == auto
3, # noise: noise level, algorithm-dependent
5, # processes: number of parallel processors
0, # threshold: adjacent frames with < n% diff won't be processed (0 == process all)
"waifu2x", # algorithm: the algorithm to use to process the video
)

View File

@@ -0,0 +1,19 @@
#!/bin/sh
# mount the current (video2x repo root) directory into a container
# with drivers installed so the code can be debugged in the container
# this one launches an interactive shell instead of Python
set -euo pipefail
sudo podman run -it --rm \
--gpus all -v /dev/dri:/dev/dri \
-v $PWD:/host \
-m 15g \
--cpus 0.9 \
-v $HOME/projects/media2x/video2x:/video2x \
-e PYTHONPATH=/video2x \
-e PYTHONDONTWRITEBYTECODE=1 \
--entrypoint=/bin/bash \
ghcr.io/k4yt3x/video2x:5.0.0-beta4-cuda
# alias upscale='python3 -m video2x -i /host/input-large.mp4 -o /host/output-large.mp4 -p3 upscale -h 1440 -d waifu2x -n3'

View File

@@ -0,0 +1,19 @@
#!/bin/sh
# mount the current (video2x repo root) directory into a container
# with drivers installed so the code can be debugged in the container
set -euo pipefail
sudo podman run -it --rm \
--gpus all -v /dev/dri:/dev/dri \
-v $PWD:/host \
-m 15g \
--cpus 0.9 \
-v $HOME/projects/media2x/video2x:/video2x \
-e PYTHONPATH=/video2x \
-e PYTHONDONTWRITEBYTECODE=1 \
ghcr.io/k4yt3x/video2x:5.0.0-beta4-cuda \
-i data/input.mp4 -o data/output.mp4 \
-p3 \
upscale \
-h 1440 -a waifu2x -n3

View File

@@ -19,12 +19,12 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
Name: Package Init
Author: K4YT3X
Date Created: July 3, 2021
Last Modified: February 11, 2022
Last Modified: February 16, 2022
"""
# version assignment has to precede imports to
# prevent setup.cfg from producing import errors
__version__ = "5.0.0-beta2"
__version__ = "5.0.0-beta4"
# local imports
from .video2x import Video2X

View File

@@ -19,7 +19,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
Name: Video Decoder
Author: K4YT3X
Date Created: June 17, 2021
Last Modified: February 12, 2022
Last Modified: February 16, 2022
"""
# built-in imports
@@ -59,7 +59,7 @@ class VideoDecoder(threading.Thread):
processing_queue: queue.Queue,
processing_settings: tuple,
ignore_max_image_pixels=True,
):
) -> None:
threading.Thread.__init__(self)
self.running = False
self.input_path = input_path
@@ -91,7 +91,7 @@ class VideoDecoder(threading.Thread):
# stderr=subprocess.DEVNULL,
)
def run(self):
def run(self) -> None:
self.running = True
# the index of the frame
@@ -164,5 +164,5 @@ class VideoDecoder(threading.Thread):
self.running = False
return super().run()
def stop(self):
def stop(self) -> None:
self.running = False

View File

@@ -19,7 +19,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
Name: Video Encoder
Author: K4YT3X
Date Created: June 17, 2021
Last Modified: June 30, 2021
Last Modified: February 16, 2022
"""
# built-in imports
@@ -61,7 +61,11 @@ class VideoEncoder(threading.Thread):
total_frames: int,
processed_frames: multiprocessing.managers.ListProxy,
processed: multiprocessing.sharedctypes.Synchronized,
):
copy_audio: bool = True,
copy_subtitle: bool = True,
copy_data: bool = False,
copy_attachments: bool = False,
) -> None:
threading.Thread.__init__(self)
self.running = False
self.input_path = input_path
@@ -82,31 +86,31 @@ class VideoEncoder(threading.Thread):
r=frame_rate,
)
# map additional streams from original file
"""
# copy additional streams from original file
# https://ffmpeg.org/ffmpeg.html#Stream-specifiers-1
additional_streams = [
# self.original["v?"],
self.original["a?"],
self.original["s?"],
self.original["d?"],
self.original["t?"],
# self.original["1:v?"],
self.original["a?"] if copy_audio is True else None,
self.original["s?"] if copy_subtitle is True else None,
self.original["d?"] if copy_data is True else None,
self.original["t?"] if copy_attachments is True else None,
]
"""
# run FFmpeg and produce final output
self.encoder = subprocess.Popen(
ffmpeg.compile(
ffmpeg.output(
frames,
*[s for s in additional_streams if s is not None],
str(self.output_path),
pix_fmt="yuv420p",
vcodec="libx264",
acodec="copy",
# acodec="copy",
r=frame_rate,
crf=17,
vsync="1",
# map_metadata=1,
# metadata="comment=Upscaled with Video2X",
map_metadata=1,
metadata="comment=Processed with Video2X",
)
.global_args("-hide_banner")
.global_args("-nostats")
@@ -123,7 +127,7 @@ class VideoEncoder(threading.Thread):
# stderr=subprocess.DEVNULL,
)
def run(self):
def run(self) -> None:
self.running = True
frame_index = 0
while self.running and frame_index < self.total_frames:
@@ -165,5 +169,5 @@ class VideoEncoder(threading.Thread):
self.running = False
return super().run()
def stop(self):
def stop(self) -> None:
self.running = False

View File

@@ -19,7 +19,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
Name: Interpolator
Author: K4YT3X
Date Created: May 27, 2021
Last Modified: February 2, 2022
Last Modified: February 16, 2022
"""
# local imports
@@ -38,7 +38,7 @@ from PIL import ImageChops, ImageStat
from loguru import logger
DRIVER_CLASSES = {"rife": Rife}
ALGORITHM_CLASSES = {"rife": Rife}
class Interpolator(multiprocessing.Process):
@@ -46,7 +46,7 @@ class Interpolator(multiprocessing.Process):
self,
processing_queue: multiprocessing.Queue,
processed_frames: multiprocessing.managers.ListProxy,
):
) -> None:
multiprocessing.Process.__init__(self)
self.running = False
self.processing_queue = processing_queue
@@ -54,10 +54,10 @@ class Interpolator(multiprocessing.Process):
signal.signal(signal.SIGTERM, self._stop)
def run(self):
def run(self) -> None:
self.running = True
logger.info(f"Interpolator process {self.name} initiating")
driver_objects = {}
processor_objects = {}
while self.running:
try:
try:
@@ -65,7 +65,7 @@ class Interpolator(multiprocessing.Process):
(
frame_index,
(image0, image1),
(difference_threshold, driver),
(difference_threshold, algorithm),
) = self.processing_queue.get(False)
except queue.Empty:
time.sleep(0.1)
@@ -86,13 +86,13 @@ class Interpolator(multiprocessing.Process):
# process the interpolation
if difference_ratio < difference_threshold:
# select a driver object with the required settings
# select a processor object with the required settings
# create a new object if none are available
driver_object = driver_objects.get(driver)
if driver_object is None:
driver_object = DRIVER_CLASSES[driver](0)
driver_objects[driver] = driver_object
interpolated_image = driver_object.process(image0, image1)
processor_object = processor_objects.get(algorithm)
if processor_object is None:
processor_object = ALGORITHM_CLASSES[algorithm](0)
processor_objects[algorithm] = processor_object
interpolated_image = processor_object.process(image0, image1)
# if the difference is greater than threshold
# there's a change in camera angle, ignore
@@ -116,5 +116,5 @@ class Interpolator(multiprocessing.Process):
self.running = False
return super().run()
def _stop(self, _signal_number, _frame):
def _stop(self, _signal_number, _frame) -> None:
self.running = False

View File

@@ -19,7 +19,7 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
Name: Upscaler
Author: K4YT3X
Date Created: May 27, 2021
Last Modified: August 17, 2021
Last Modified: February 16, 2022
"""
# local imports
@@ -40,15 +40,15 @@ import time
from PIL import Image, ImageChops, ImageStat
from loguru import logger
# fixed scaling ratios supported by the drivers
# fixed scaling ratios supported by the algorithms
# that only support certain fixed scale ratios
DRIVER_FIXED_SCALING_RATIOS = {
ALGORITHM_FIXED_SCALING_RATIOS = {
"waifu2x": [1, 2],
"srmd": [2, 3, 4],
"realsr": [4],
}
DRIVER_CLASSES = {"waifu2x": Waifu2x, "srmd": Srmd, "realsr": Realsr}
ALGORITHM_CLASSES = {"waifu2x": Waifu2x, "srmd": Srmd, "realsr": Realsr}
class Upscaler(multiprocessing.Process):
@@ -56,7 +56,7 @@ class Upscaler(multiprocessing.Process):
self,
processing_queue: multiprocessing.Queue,
processed_frames: multiprocessing.managers.ListProxy,
):
) -> None:
multiprocessing.Process.__init__(self)
self.running = False
self.processing_queue = processing_queue
@@ -64,12 +64,12 @@ class Upscaler(multiprocessing.Process):
signal.signal(signal.SIGTERM, self._stop)
def run(self):
def run(self) -> None:
self.running = True
logger.opt(colors=True).info(
f"Upscaler process <blue>{self.name}</blue> initiating"
)
driver_objects = {}
processor_objects = {}
while self.running:
try:
try:
@@ -82,7 +82,7 @@ class Upscaler(multiprocessing.Process):
output_height,
noise,
difference_threshold,
driver,
algorithm,
),
) = self.processing_queue.get(False)
@@ -123,9 +123,9 @@ class Upscaler(multiprocessing.Process):
# calculate required minimum scale ratio
output_scale = max(output_width / width, output_height / height)
# select the optimal driver scaling ratio to use
# select the optimal algorithm scaling ratio to use
supported_scaling_ratios = sorted(
DRIVER_FIXED_SCALING_RATIOS[driver]
ALGORITHM_FIXED_SCALING_RATIOS[algorithm]
)
remaining_scaling_ratio = math.ceil(output_scale)
@@ -163,17 +163,17 @@ class Upscaler(multiprocessing.Process):
for job in scaling_jobs:
# select a driver object with the required settings
# select a processor object with the required settings
# create a new object if none are available
driver_object = driver_objects.get((driver, job))
if driver_object is None:
driver_object = DRIVER_CLASSES[driver](
processor_object = processor_objects.get((algorithm, job))
if processor_object is None:
processor_object = ALGORITHM_CLASSES[algorithm](
scale=job, noise=noise
)
driver_objects[(driver, job)] = driver_object
processor_objects[(algorithm, job)] = processor_object
# process the image with the selected driver
image1 = driver_object.process(image1)
# process the image with the selected algorithm
image1 = processor_object.process(image1)
# downscale the image to the desired output size and save the image to disk
image1 = image1.resize((output_width, output_height), Image.LANCZOS)
@@ -193,5 +193,5 @@ class Upscaler(multiprocessing.Process):
self.running = False
return super().run()
def _stop(self, _signal_number, _frame):
def _stop(self, _signal_number, _frame) -> None:
self.running = False

View File

@@ -26,8 +26,8 @@ __ __ _ _ ___ __ __
Name: Video2X
Creator: K4YT3X
Date Created: Feb 24, 2018
Last Modified: February 12, 2022
Date Created: February 24, 2018
Last Modified: February 16, 2022
Editor: BrianPetkovsek
Last Modified: June 17, 2019
@@ -73,29 +73,23 @@ import cv2
import ffmpeg
LEGAL_INFO = """Video2X {}
Author: K4YT3X
License: GNU GPL v3
Github Page: https://github.com/k4yt3x/video2x
Contact: k4yt3x@k4yt3x.com""".format(
LEGAL_INFO = """Video2X\t\t{}
Author:\t\tK4YT3X
License:\tGNU AGPL v3
Github Page:\thttps://github.com/k4yt3x/video2x
Contact:\ti@k4yt3x.com""".format(
__version__
)
UPSCALING_DRIVERS = [
# algorithms available for upscaling tasks
UPSCALING_ALGORITHMS = [
"waifu2x",
"srmd",
"realsr",
]
INTERPOLATION_DRIVERS = ["rife"]
# fixed scaling ratios supported by the drivers
# that only support certain fixed scale ratios
DRIVER_FIXED_SCALING_RATIOS = {
"waifu2x": [1, 2],
"srmd": [2, 3, 4],
"realsr": [4],
}
# algorithms available for frame interpolation tasks
INTERPOLATION_ALGORITHMS = ["rife"]
# progress bar labels for different modes
MODE_LABELS = {"upscale": "Upscaling", "interpolate": "Interpolating"}
@@ -128,10 +122,10 @@ class Video2X:
- interpolate: perform motion interpolation on a file
"""
def __init__(self):
self.version = "5.0.0"
def __init__(self) -> None:
self.version = __version__
def _get_video_info(self, path: pathlib.Path):
def _get_video_info(self, path: pathlib.Path) -> tuple:
"""
get video file information with FFmpeg
@@ -173,7 +167,7 @@ class Video2X:
mode: str,
processes: int,
processing_settings: tuple,
):
) -> None:
# record original STDOUT and STDERR for restoration
original_stdout = sys.stdout
@@ -279,6 +273,10 @@ class Video2X:
logger.exception(e)
exception.append(e)
# if no exceptions were produced
else:
logger.success("Processing completed successfully")
finally:
# mark processing queue as closed
self.processing_queue.close()
@@ -319,7 +317,7 @@ class Video2X:
noise: int,
processes: int,
threshold: float,
driver: str,
algorithm: str,
) -> None:
# get basic video information
@@ -354,7 +352,7 @@ class Video2X:
output_height,
noise,
threshold,
driver,
algorithm,
),
)
@@ -364,7 +362,7 @@ class Video2X:
output_path: pathlib.Path,
processes: int,
threshold: float,
driver: str,
algorithm: str,
) -> None:
# get video basic information
@@ -386,7 +384,7 @@ class Video2X:
Interpolator,
"interpolate",
processes,
(threshold, driver),
(threshold, algorithm),
)
@@ -401,7 +399,7 @@ def parse_arguments() -> argparse.Namespace:
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-v", "--version", help="show version information and exit", action="store_true"
"--version", help="show version information and exit", action="store_true"
)
parser.add_argument(
"-i",
@@ -432,7 +430,12 @@ def parse_arguments() -> argparse.Namespace:
help="action to perform", dest="action", required=True
)
upscale = action.add_parser("upscale", help="upscale a file", add_help=False)
upscale = action.add_parser(
"upscale",
help="upscale a file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
)
upscale.add_argument(
"--help", action="help", help="show this help message and exit"
)
@@ -440,52 +443,65 @@ def parse_arguments() -> argparse.Namespace:
upscale.add_argument("-h", "--height", type=int, help="output height")
upscale.add_argument("-n", "--noise", type=int, help="denoise level", default=3)
upscale.add_argument(
"-d",
"--driver",
choices=UPSCALING_DRIVERS,
help="driver to use for upscaling",
default=UPSCALING_DRIVERS[0],
"-a",
"--algorithm",
choices=UPSCALING_ALGORITHMS,
help="algorithm to use for upscaling",
default=UPSCALING_ALGORITHMS[0],
)
upscale.add_argument(
"-t",
"--threshold",
type=float,
help="skip if the % difference between two adjacent frames is below this value; set to 0 to process all frames",
help=(
"skip if the percent difference between two adjacent frames is below this"
" value; set to 0 to process all frames"
),
default=0,
)
# interpolator arguments
interpolate = action.add_parser(
"interpolate", help="interpolate frames for file", add_help=False
"interpolate",
help="interpolate frames for file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
)
interpolate.add_argument(
"--help", action="help", help="show this help message and exit"
)
interpolate.add_argument(
"-d",
"--driver",
choices=UPSCALING_DRIVERS,
help="driver to use for upscaling",
default=INTERPOLATION_DRIVERS[0],
"-a",
"--algorithm",
choices=UPSCALING_ALGORITHMS,
help="algorithm to use for upscaling",
default=INTERPOLATION_ALGORITHMS[0],
)
interpolate.add_argument(
"-t",
"--threshold",
type=float,
help="skip if the % difference between two adjacent frames exceeds this value; set to 100 to interpolate all frames",
help=(
"skip if the percent difference between two adjacent frames exceeds this"
" value; set to 100 to interpolate all frames"
),
default=10,
)
return parser.parse_args()
def main():
def main() -> None:
"""
command line direct invocation
program entry point
"""
try:
# display version and lawful informaition
if "--version" in sys.argv:
print(LEGAL_INFO)
sys.exit(0)
# parse command line arguments
args = parse_arguments()
@@ -500,11 +516,6 @@ def main():
# add new sink with custom handler
logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT)
# display version and lawful informaition
if args.version:
print(LEGAL_INFO)
sys.exit(0)
# print package version and copyright notice
logger.opt(colors=True).info(f"<magenta>Video2X {__version__}</magenta>")
logger.opt(colors=True).info(
@@ -523,7 +534,7 @@ def main():
args.noise,
args.processes,
args.threshold,
args.driver,
args.algorithm,
)
elif args.action == "interpolate":
@@ -532,13 +543,11 @@ def main():
args.output,
args.processes,
args.threshold,
args.driver,
args.algorithm,
)
logger.success("Processing completed successfully")
# don't print the traceback for manual terminations
except (SystemExit, KeyboardInterrupt) as e:
except KeyboardInterrupt as e:
raise SystemExit(e)
except Exception as e: