docs: update README, add TODO

This commit is contained in:
himeditator
2025-07-20 00:32:57 +08:00
parent f7d2df938d
commit 697488ce84
5 changed files with 184 additions and 7 deletions

View File

@@ -183,8 +183,6 @@ npm run dev
### 构建项目 ### 构建项目
注意目前软件只在 Windows 和 macOS 平台上进行了构建和测试,无法保证软件在 Linux 平台下的正确性。
```bash ```bash
# For windows # For windows
npm run build:win npm run build:win

View File

@@ -183,8 +183,6 @@ npm run dev
### Build Project ### Build Project
Note: Currently the software has only been built and tested on Windows and macOS platforms. Correct operation on Linux platform is not guaranteed.
```bash ```bash
# For windows # For windows
npm run build:win npm run build:win

View File

@@ -183,8 +183,6 @@ npm run dev
### プロジェクト構築 ### プロジェクト構築
現在、ソフトウェアは Windows と macOS プラットフォームでのみ構築とテストが行われており、Linux プラットフォームでの正しい動作は保証できません。
```bash ```bash
# Windows 用 # Windows 用
npm run build:win npm run build:win

View File

@@ -18,7 +18,7 @@
## 待完成 ## 待完成
- [ ] 探索更多的语音转文字模型 - [ ] 验证 / 添加基于 sherpa-onnx 的字幕引擎
## 后续计划 ## 后续计划

View File

@@ -0,0 +1,183 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"id": "eff7155c",
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import os\n",
"import numpy as np\n",
"import sounddevice as sd\n",
"from sherpa_onnx import OnlineRecognizer\n",
"\n",
"current_dir = os.getcwd() \n",
"sys.path.append(os.path.join(current_dir, '../caption-engine'))\n",
"\n",
"from sysaudio.win import AudioStream\n",
"from audioprcs import resampleRawChunk, mergeChunkChannels"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "9447e927",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Started! Please speak\n",
"木のデップハートへいてかいものしましたNIさんはまいばんなにちを兄さんは毎朝七時に家を出かけます机の上にねこがいます测试"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[10]\u001b[39m\u001b[32m, line 35\u001b[39m\n\u001b[32m 33\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m sd.InputStream(channels=\u001b[32m1\u001b[39m, dtype=\u001b[33m\"\u001b[39m\u001b[33mfloat32\u001b[39m\u001b[33m\"\u001b[39m, samplerate=sample_rate) \u001b[38;5;28;01mas\u001b[39;00m s:\n\u001b[32m 34\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m35\u001b[39m samples, _ = \u001b[43ms\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43msamples_per_read\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# a blocking read\u001b[39;00m\n\u001b[32m 36\u001b[39m samples = samples.reshape(-\u001b[32m1\u001b[39m)\n\u001b[32m 37\u001b[39m stream.accept_waveform(sample_rate, samples)\n",
"\u001b[36mFile \u001b[39m\u001b[32md:\\Projects\\auto-caption\\caption-engine\\subenv\\Lib\\site-packages\\sounddevice.py:1475\u001b[39m, in \u001b[36mInputStream.read\u001b[39m\u001b[34m(self, frames)\u001b[39m\n\u001b[32m 1473\u001b[39m dtype, _ = _split(\u001b[38;5;28mself\u001b[39m._dtype)\n\u001b[32m 1474\u001b[39m channels, _ = _split(\u001b[38;5;28mself\u001b[39m._channels)\n\u001b[32m-> \u001b[39m\u001b[32m1475\u001b[39m data, overflowed = \u001b[43mRawInputStream\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mframes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1476\u001b[39m data = _array(data, channels, dtype)\n\u001b[32m 1477\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m data, overflowed\n",
"\u001b[36mFile \u001b[39m\u001b[32md:\\Projects\\auto-caption\\caption-engine\\subenv\\Lib\\site-packages\\sounddevice.py:1245\u001b[39m, in \u001b[36mRawInputStream.read\u001b[39m\u001b[34m(self, frames)\u001b[39m\n\u001b[32m 1243\u001b[39m samplesize, _ = _split(\u001b[38;5;28mself\u001b[39m._samplesize)\n\u001b[32m 1244\u001b[39m data = _ffi.new(\u001b[33m'\u001b[39m\u001b[33msigned char[]\u001b[39m\u001b[33m'\u001b[39m, channels * samplesize * frames)\n\u001b[32m-> \u001b[39m\u001b[32m1245\u001b[39m err = \u001b[43m_lib\u001b[49m\u001b[43m.\u001b[49m\u001b[43mPa_ReadStream\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_ptr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mframes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1246\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m err == _lib.paInputOverflowed:\n\u001b[32m 1247\u001b[39m overflowed = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
"\u001b[31mKeyboardInterrupt\u001b[39m: "
]
}
],
"source": [
"devices = sd.query_devices()\n",
"if len(devices) == 0:\n",
" print(\"No microphone devices found\")\n",
" sys.exit(0)\n",
"\n",
"# print(devices)\n",
"default_input_device_idx = sd.default.device[0]\n",
"# print(f'Use default device: {devices[default_input_device_idx][\"name\"]}') # type: ignore\n",
"\n",
"m_path = \"D:/Projects/auto-caption/caption-engine/models/sherpa-onnx-streaming-zipformer-ar_en_id_ja_ru_th_vi_zh-2025-02-10\"\n",
"recognizer = OnlineRecognizer.from_transducer(\n",
" tokens=f\"{m_path}/tokens.txt\",\n",
" encoder=f\"{m_path}/encoder-epoch-75-avg-11-chunk-16-left-128.int8.onnx\",\n",
" decoder=f\"{m_path}/decoder-epoch-75-avg-11-chunk-16-left-128.onnx\",\n",
" joiner=f\"{m_path}/joiner-epoch-75-avg-11-chunk-16-left-128.int8.onnx\",\n",
" num_threads=2,\n",
" sample_rate=16000,\n",
" feature_dim=80,\n",
" enable_endpoint_detection=True,\n",
" rule1_min_trailing_silence=2.4,\n",
" rule2_min_trailing_silence=1.2,\n",
" rule3_min_utterance_length=300, # it essentially disables this rule\n",
")\n",
"\n",
"print(\"Started! Please speak\")\n",
"\n",
"# The model is using 16 kHz, we use 48 kHz here to demonstrate that\n",
"# sherpa-onnx will do resampling inside.\n",
"sample_rate = 48000\n",
"samples_per_read = int(0.1 * sample_rate) # 0.1 second = 100 ms\n",
"last_result = \"\"\n",
"stream = recognizer.create_stream()\n",
"with sd.InputStream(channels=1, dtype=\"float32\", samplerate=sample_rate) as s:\n",
" while True:\n",
" samples, _ = s.read(samples_per_read) # a blocking read\n",
" samples = samples.reshape(-1)\n",
" stream.accept_waveform(sample_rate, samples)\n",
" while recognizer.is_ready(stream):\n",
" recognizer.decode_stream(stream)\n",
" result = recognizer.get_result(stream)\n",
" if last_result != result:\n",
" last_result = result\n",
" print(\"\\r{}\".format(result), end=\"\", flush=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abb254f8",
"metadata": {},
"outputs": [],
"source": [
"# m_path = \"D:/Projects/auto-caption/caption-engine/models/sherpa-onnx-nemo-streaming-fast-conformer-transducer-en-80ms-int8\"\n",
"# recognizer = OnlineRecognizer.from_transducer(\n",
"# tokens=f\"{m_path}/tokens.txt\",\n",
"# encoder=f\"{m_path}/encoder.int8.onnx\",\n",
"# decoder=f\"{m_path}/decoder.int8.onnx\",\n",
"# joiner=f\"{m_path}/joiner.int8.onnx\",\n",
"# enable_endpoint_detection=True,\n",
"# )\n",
"\n",
"m_path = \"D:/Projects/auto-caption/caption-engine/models/sherpa-onnx-streaming-zipformer-ar_en_id_ja_ru_th_vi_zh-2025-02-10\"\n",
"recognizer = OnlineRecognizer.from_transducer(\n",
" tokens=f\"{m_path}/tokens.txt\",\n",
" encoder=f\"{m_path}/encoder-epoch-75-avg-11-chunk-16-left-128.int8.onnx\",\n",
" decoder=f\"{m_path}/decoder-epoch-75-avg-11-chunk-16-left-128.onnx\",\n",
" joiner=f\"{m_path}/joiner-epoch-75-avg-11-chunk-16-left-128.int8.onnx\",\n",
" num_threads=1,\n",
" sample_rate=16000,\n",
" feature_dim=80,\n",
" enable_endpoint_detection=True,\n",
" rule1_min_trailing_silence=2.4,\n",
" rule2_min_trailing_silence=1.2,\n",
" rule3_min_utterance_length=300, # it essentially disables this rule\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6f10fc82",
"metadata": {},
"outputs": [],
"source": [
"rec_stream = recognizer.create_stream()\n",
"\n",
"stream = AudioStream(0, 1)\n",
"stream.printInfo()\n",
"\n",
"stream.openStream()\n",
"\n",
"\n",
"for i in range(300):\n",
" chunk = stream.read_chunk()\n",
" chunk_mono = resampleRawChunk(chunk, stream.CHANNELS, stream.RATE, 16000)\n",
" chunk_mono = np.frombuffer(chunk_mono, dtype=np.int16)\n",
" chunk_mono = chunk_mono.astype(np.float32)\n",
" print(i, chunk_mono.shape)\n",
" # print(type(chunk_mono), chunk_mono.shape)\n",
" rec_stream.accept_waveform(16000, chunk_mono)\n",
" while recognizer.is_ready(rec_stream):\n",
" recognizer.decode_stream(rec_stream)\n",
" result = recognizer.get_result(rec_stream)\n",
" if result:\n",
" print(result)\n",
" if recognizer.is_endpoint(rec_stream):\n",
" recognizer.reset(rec_stream)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "subenv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}