1. 移除软件包自带的本地 whisper(需单独安装)
2. 重构版本底层依赖,移除外部依赖
3. 修复 首页 暗黑模式不兼容的问题
4. 修复 SD 合并提示词报错
This commit is contained in:
lq1405 2024-10-20 23:19:22 +08:00
parent efa8d3b2a2
commit f4d042f699
38 changed files with 2749 additions and 1568 deletions

10
.gitignore vendored
View File

@ -30,3 +30,13 @@ resources/config*
*.log* *.log*
resources/scripts/db/book.realm.lock resources/scripts/db/book.realm.lock
resources/scripts/db/software.realm.lock resources/scripts/db/software.realm.lock
resources/scripts/localWhisper/__pycache__/*
resources/scripts/laitool/.venv
resources/scripts/laitool/build
resources/scripts/laitool/dist
resources/scripts/localWhisper/build/*
resources/scripts/__pycache__/*
resources/scripts/db/*
resources/scripts/localWhisper/.venv/*
resources/scripts/localWhisper/_internal/*
resources/scripts/localWhisper/local_whisper.exe

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{ {
"name": "laitool", "name": "laitool",
"version": "3.1.6", "version": "3.1.7",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "laitool", "name": "laitool",
"version": "3.1.6", "version": "3.1.7",
"hasInstallScript": true, "hasInstallScript": true,
"dependencies": { "dependencies": {
"@alicloud/alimt20181012": "^1.2.0", "@alicloud/alimt20181012": "^1.2.0",

View File

@ -1,6 +1,6 @@
{ {
"name": "laitool", "name": "laitool",
"version": "3.1.6", "version": "3.1.7",
"description": "An AI tool for image processing, video processing, and other functions.", "description": "An AI tool for image processing, video processing, and other functions.",
"main": "./out/main/index.js", "main": "./out/main/index.js",
"author": "laitool.cn", "author": "laitool.cn",
@ -84,6 +84,7 @@
"resources/image/style/**", "resources/image/style/**",
"resources/image/zhanwei.png", "resources/image/zhanwei.png",
"resources/scripts/model/**", "resources/scripts/model/**",
"resources/scripts/Lai.exe",
"resources/scripts/discordScript.js", "resources/scripts/discordScript.js",
"resources/tmp/**", "resources/tmp/**",
"resources/icon.ico" "resources/icon.ico"

View File

@ -4,7 +4,6 @@ import json
import os import os
import sys import sys
import clip import clip
import getgrame
import Push_back_Prompt import Push_back_Prompt
import public_tools import public_tools
import shotSplit import shotSplit
@ -56,6 +55,8 @@ if sys.argv[1] == "-c":
clip = clip.Clip(cript_directory, sys.argv[2], sys.argv[3]) clip = clip.Clip(cript_directory, sys.argv[2], sys.argv[3])
clip.MergeVideosAndClip() clip.MergeVideosAndClip()
pass pass
# 获取字体 # 获取字体
elif sys.argv[1] == "-f": elif sys.argv[1] == "-f":
# 获取本地已安装的字幕。然后返回 # 获取本地已安装的字幕。然后返回
@ -77,22 +78,17 @@ elif sys.argv[1] == "-p":
Push_back_Prompt.init(sys.argv[2], sys.argv[3], sys.argv[4]) Push_back_Prompt.init(sys.argv[2], sys.argv[3], sys.argv[4])
pass pass
# 剪映抽帧
elif sys.argv[1] == "-k":
# print("")
getgrame.init(sys.argv[2], sys.argv[3], sys.argv[4])
pass
elif sys.argv[1] == "-ka": elif sys.argv[1] == "-ka":
shotSplit.get_fram(sys.argv[2], sys.argv[3], sys.argv[4]) shotSplit.get_fram(sys.argv[2], sys.argv[3], sys.argv[4])
pass pass
# 智能分镜。字幕识别 # # 智能分镜。字幕识别
elif sys.argv[1] == "-a": # elif sys.argv[1] == "-a":
print("开始算法分镜:" + sys.argv[2] + " -- 输出文件夹:" + sys.argv[3]) # print("开始算法分镜:" + sys.argv[2] + " -- 输出文件夹:" + sys.argv[3])
shotSplit.init(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]) # shotSplit.init(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
# 本地提取音频
elif sys.argv[1] == "-t": # # 本地提取音频
print("开始提取文字:" + sys.argv[2]) # elif sys.argv[1] == "-t":
shotSplit.GetTextTask(sys.argv[2], sys.argv[3], sys.argv[4]) # print("开始提取文字:" + sys.argv[2])
pass # shotSplit.GetTextTask(sys.argv[2], sys.argv[3], sys.argv[4])
# pass

View File

@ -1,36 +1,34 @@
# -*- mode: python ; coding: utf-8 -*- # -*- mode: python ; coding: utf-8 -*-
from PyInstaller.building.datastruct import Tree
from PyInstaller.utils.hooks import get_package_paths
PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1]
datas = [(PACKAGE_DIRECTORY, 'faster_whisper')]
a = Analysis( a = Analysis(
['Lai.py'], ['Lai.py'],
pathex=[], pathex=[],
binaries=[], binaries=[],
datas=datas, datas=[],
hiddenimports=[], hiddenimports=[],
hookspath=[], hookspath=[],
hooksconfig={}, hooksconfig={},
runtime_hooks=[], runtime_hooks=[],
excludes=[], excludes=[],
noarchive=False, noarchive=False,
optimize=0,
) )
pyz = PYZ(a.pure) pyz = PYZ(a.pure)
exe = EXE( exe = EXE(
pyz, pyz,
a.scripts, a.scripts,
a.binaries,
a.datas,
[], [],
exclude_binaries=True,
name='Lai', name='Lai',
debug=False, debug=False,
bootloader_ignore_signals=False, bootloader_ignore_signals=False,
strip=False, strip=False,
upx=True, upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True, console=True,
disable_windowed_traceback=False, disable_windowed_traceback=False,
argv_emulation=False, argv_emulation=False,
@ -38,12 +36,3 @@ exe = EXE(
codesign_identity=None, codesign_identity=None,
entitlements_file=None, entitlements_file=None,
) )
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Lai',
)

View File

@ -1,11 +1,17 @@
# -*- mode: python ; coding: utf-8 -*- # -*- mode: python ; coding: utf-8 -*-
from PyInstaller.building.datastruct import Tree
from PyInstaller.utils.hooks import get_package_paths
PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1]
datas = [(PACKAGE_DIRECTORY, 'faster_whisper')]
a = Analysis( a = Analysis(
['lama_inpaint.py'], ['Lai.py'],
pathex=[], pathex=[],
binaries=[], binaries=[],
datas=[], datas=datas,
hiddenimports=[], hiddenimports=[],
hookspath=[], hookspath=[],
hooksconfig={}, hooksconfig={},
@ -18,16 +24,13 @@ pyz = PYZ(a.pure)
exe = EXE( exe = EXE(
pyz, pyz,
a.scripts, a.scripts,
a.binaries,
a.datas,
[], [],
name='lama_inpaint', exclude_binaries=True,
name='Lai',
debug=False, debug=False,
bootloader_ignore_signals=False, bootloader_ignore_signals=False,
strip=False, strip=False,
upx=True, upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True, console=True,
disable_windowed_traceback=False, disable_windowed_traceback=False,
argv_emulation=False, argv_emulation=False,
@ -35,3 +38,12 @@ exe = EXE(
codesign_identity=None, codesign_identity=None,
entitlements_file=None, entitlements_file=None,
) )
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Lai',
)

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,2 @@
@echo off
pyinstaller --upx-dir="C:\\Users\\27698\\Desktop\\upx-4.2.4-win64\upx.exe" local_whisper.py

View File

@ -0,0 +1,170 @@
# -*- coding: utf-8 -*-
import io
import os
import sys
import public_tools
from pathlib import Path
from huggingface_hub import hf_hub_download
from faster_whisper import WhisperModel
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
# 判断sys.argv 的长度如果小于2说明没有传入参数设置初始参数
# "C:\\Users\\27698\\Desktop\\LAITool\\resources\\scripts\\Lai.exe" -c "D:/来推项目集/7.4/娱乐:江湖大哥退休,去拍电影/scripts/output_crop_00001.json" "NVIDIA"
# if len(sys.argv) < 2:
# sys.argv = [
# "C:\\Users\\27698\\Desktop\\LAITool\\resources\\scripts\\Lai.exe",
# "-w",
# "C:\\Users\\27698\\Desktop\\测试\\test\\mjTestoutput_crop_00001.mp4",
# "C:\\Users\\27698\\Desktop\\测试\\test\data\\frame",
# "C:\\Users\\27698\\Desktop\\测试\\test\\tmp\\input_crop",
# 30,
# "NVIDIA",
# ]
print(sys.argv)
if len(sys.argv) < 2:
print("Params: <runtime-config.json>")
exit(0)
if getattr(sys, "frozen", False):
cript_directory = os.path.dirname(sys.executable)
elif __file__:
cript_directory = os.path.dirname(__file__)
def GetText(out_folder, mp3_folder):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
# 拿到指定文件夹里面的所有的MP3文件
mp3_list = []
for root, dirs, files in os.walk(mp3_folder):
for file in files:
if file.endswith(".mp3"):
mp3_list.append(os.path.join(root, file))
for mp in mp3_list:
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
print("文本全部识别成功,正在写出")
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, "文案.txt"))
print("写出完成")
sys.stdout.flush()
def GetTextTask(out_folder, mp, name):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, name + ".txt"))
sys.stdout.flush()
# GetTextTask(
# "C:\\Users\\27698\\Desktop\\测试\\mjTest",
# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\frame\\00001.mp4",
# "00001",
# )
if sys.argv[1] == "-ts":
GetText(
sys.argv[2],
sys.argv[3],
)
elif sys.argv[1] == "-t":
GetTextTask(
sys.argv[2],
sys.argv[3],
sys.argv[4],
)
else:
print("Params: <runtime-config.json>")
exit(0)

View File

@ -0,0 +1,50 @@
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.building.datastruct import Tree
from PyInstaller.utils.hooks import get_package_paths
PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1]
datas = [(PACKAGE_DIRECTORY, 'faster_whisper')]
a = Analysis(
['local_whisper.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=0,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='local_whisper',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='local_whisper',
)

View File

@ -0,0 +1,351 @@
# 读取文件的方法
import json
import os
import win32api
import win32con
import pywintypes
import shutil
import re
class PublicTools:
"""
一些公用的基础方法
"""
def delete_path(self, path):
"""
删除指定路径的文件或者是文件夹
"""
# 检查路径是否存在
if not os.path.exists(path):
return
# 检查路径是文件还是文件夹
if os.path.isfile(path):
# 是文件,执行删除
try:
os.remove(path)
except Exception as e:
raise e
elif os.path.isdir(path):
# 是文件夹,执行删除
try:
shutil.rmtree(path)
except Exception as e:
raise e
else:
raise
def list_files_by_extension(self, folder_path, extension):
"""
读取指定文件夹下面的所有的指定拓展文件命的文件列表
"""
file_list = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith(extension):
file_list.append(os.path.join(root, file))
elif file.endswith(extension.upper()):
file_list.append(os.path.join(root, file))
return file_list
def get_fonts_from_registry(self, key_path):
"""
获取注册表中安装的字体文件
"""
font_names = []
try:
key = win32api.RegOpenKeyEx(
(
win32con.HKEY_LOCAL_MACHINE
if "HKEY_LOCAL_MACHINE" in key_path
else win32con.HKEY_CURRENT_USER
),
key_path.split("\\", 1)[1],
0,
win32con.KEY_READ,
)
i = 0
while True:
try:
value = win32api.RegEnumValue(key, i)
font_name = value[0]
# 使用正则表达式移除括号及其内容
font_name = re.sub(r"\s*\([^)]*\)$", "", font_name)
font_names.append(font_name)
i += 1
except pywintypes.error as e:
if e.winerror == 259: # 没有更多的数据
break
else:
raise
finally:
try:
win32api.RegCloseKey(key)
except:
pass
return font_names
def get_installed_fonts(self):
"""
获取字体文件名称并返回
"""
system_fonts = self.get_fonts_from_registry(
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts"
)
user_fonts = self.get_fonts_from_registry(
"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows NT\\CurrentVersion\\Fonts"
)
all_fonts = list(set(system_fonts + user_fonts)) # 合并并去重
return all_fonts
# 将RRGGBB转换为BBGGRR
def convert_rrggbb_to_bbggrr(self, rrggbb):
"""
将RRGGBB转换为BBGGRR
"""
if len(rrggbb) == 7:
rr = rrggbb[1:3]
gg = rrggbb[3:5]
bb = rrggbb[5:7]
return bb + gg + rr
else:
return "Invalid input"
def write_to_file(self, arr, filename):
with open(filename, "w",encoding='utf-8') as f:
for item in arr:
f.write("%s\n" % item)
# 读取文件
def read_file(fileType):
txt_path = input(f"输入{fileType}文件路径:")
txt_path = remove_prefix_and_suffix(txt_path, '"', '"')
while txt_path.strip() == "":
txt_path = input(f"输入{fileType}文件路径:")
while os.path.exists(txt_path) == False:
print("文件路径不存在错误:")
txt_path = input(f"输入{fileType}文件路径:")
txt_path = remove_prefix_and_suffix(txt_path, '"', '"')
return txt_path
def format_time_ms(milliseconds):
"""
时间转换将ms->小时:分钟:.毫秒格式
"""
seconds = milliseconds / 1000
# 计算小时、分钟和秒
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = seconds % 60
# 格式化字符串
# 使用`%02d`确保小时和分钟总是显示为两位数,`%.2f`确保秒数显示两位小数
formatted_time = f"{hours}:{minutes:02d}:{seconds:05.2f}"
return formatted_time
# 删除满足条件的开头和结尾
def remove_prefix_and_suffix(input_str, prefix_to_remove, suffix_to_remove):
if input_str.startswith(prefix_to_remove):
# 删除开头
input_str = input_str[len(prefix_to_remove) :]
if input_str.endswith(suffix_to_remove):
# 删除结尾
input_str = input_str[: -len(suffix_to_remove)]
return input_str
# 判断文件夹下面是不是有特定的文件夹
def check_if_folder_exists(parent_folder, target_folder_name):
# 获取文件夹列表
subfolders = [f.name for f in os.scandir(parent_folder) if f.is_dir()]
# 检查特定文件夹是否存在
if target_folder_name in subfolders:
return True
else:
return False
# 检查指定文件夹中是否存在特定文件。
def file_exists_in_folder(folder_path: str, file_name: str) -> bool:
# 构建完整的文件路径
file_path = os.path.join(folder_path, file_name)
# 返回文件是否存在
return os.path.isfile(file_path)
# 秒数转换,保留一位小数
def convert_to_seconds(number, count):
seconds = number / 1000000
rounded_number = round(seconds, count)
return rounded_number
def is_empty(obj):
if obj is None:
return True
elif isinstance(obj, str):
return len(obj) == 0
elif isinstance(obj, list):
return len(obj) == 0
elif isinstance(obj, dict):
return len(obj) == 0
return False
def opt_dict(obj, key, default=None):
if obj is None:
return default
if key in obj:
v = obj[key]
if not is_empty(v):
return v
return default
def read_config(path, webui=True):
with open(path, "r", encoding="utf-8") as f:
runtime_config = json.load(f)
if "config" not in runtime_config:
print("no filed 'config' in json")
return None
config = runtime_config["config"]
if "webui" not in config:
print("no filed 'webui' in 'config'")
return None
setting_config_path = config["setting"]
if not os.path.exists(setting_config_path):
setting_config_path = "config/" + setting_config_path
if not os.path.exists(setting_config_path):
setting_config_path = "../" + setting_config_path
# read config
with open(setting_config_path, "r", encoding="utf-8") as f:
setting_config = json.load(f)
# set workspace parent:根目录
if "workspace" in setting_config:
setting_config["workspace"]["parent"] = runtime_config["workspace"]
else:
setting_config["workspace"] = {"parent": runtime_config["workspace"]}
setting_config["video"] = opt_dict(runtime_config, "video")
# merge setting config
if "setting" in config:
setting_config.update(runtime_config["setting"])
# webui config
if webui:
webui_config_path = config["webui"]
if not os.path.exists(webui_config_path):
webui_config_path = "config/webui/" + webui_config_path
if not os.path.exists(webui_config_path):
webui_config_path = "../" + webui_config_path
with open(webui_config_path, "r", encoding="utf-8") as f:
webui_config = json.load(f)
# merge webui config
if "webui" in runtime_config:
webui_config.update(runtime_config["webui"])
return webui_config, setting_config
return setting_config
TAG_MODE_NONE = ""
# 工作路径
class Workspace:
def __init__(
self,
root: str,
input: str,
output: str,
input_crop: str,
output_crop: str,
input_tag: str,
input_mask: str,
input_crop_mask: str,
crop_info: str,
):
self.root = root
self.input = input
self.output = output
self.input_crop = input_crop
self.output_crop = output_crop
self.input_tag = input_tag
self.input_mask = input_mask
self.input_crop_mask = input_crop_mask
self.crop_info = crop_info
# 定义一个倍数函数
def round_up(num, mul):
return (num // mul + 1) * mul
class SettingConfig:
def __init__(self, config: dict, workParent):
self.config = config
self.webui_work_api = None
self.workParent = workParent
def to_dict(self):
return self.__dict__
def get_tag_mode(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "mode", TAG_MODE_NONE)
def get_tag_actions(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "actions", [])
def get_workspace_config(self) -> Workspace:
workspace_config = opt_dict(self.config, "workspace")
tmp_config = opt_dict(workspace_config, "tmp")
input = opt_dict(workspace_config, "input", "input")
output = opt_dict(workspace_config, "output", "output")
workspace_parent = self.workParent
tmp_parent = opt_dict(tmp_config, "parent", "tmp")
input_crop = opt_dict(tmp_config, "input_crop", "input_crop")
output_crop = opt_dict(tmp_config, "output_crop", "output_crop")
input_tag = opt_dict(tmp_config, "input_tag", "input_crop")
input_mask = opt_dict(tmp_config, "input_mask", "input_mask")
input_crop_mask = opt_dict(tmp_config, "input_crop_mask", "input_crop_mask")
crop_info = opt_dict(tmp_config, "crop_info", "crop_info.txt")
tmp_path = os.path.join(workspace_parent, tmp_parent)
return Workspace(
workspace_parent,
os.path.join(workspace_parent, input),
os.path.join(workspace_parent, output),
os.path.join(tmp_path, input_crop),
os.path.join(tmp_path, output_crop),
os.path.join(tmp_path, input_tag),
os.path.join(tmp_path, input_mask),
os.path.join(tmp_path, input_crop_mask),
os.path.join(tmp_path, crop_info),
)
def enable_tag(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "enable", True)

View File

@ -0,0 +1,307 @@
# pip install scenedetect opencv-python -i https://pypi.tuna.tsinghua.edu.cn/simple
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
from scenedetect.stats_manager import StatsManager
from scenedetect.detectors.content_detector import ContentDetector
import os
import sys
import json
import subprocess
from huggingface_hub import hf_hub_download
from faster_whisper import WhisperModel
from pathlib import Path
import public_tools
# 获取智能画面分割的时间或者秒数
def find_scenes(video_path, sensitivity):
print(
"正在计算分镜数据" + "sensitivity" + str(sensitivity) + "path : " + video_path
)
sys.stdout.flush()
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
scene_manager = SceneManager(stats_manager)
# 使用contect-detector
scene_manager.add_detector(ContentDetector(threshold=float(sensitivity)))
shijian_list = []
try:
video_manager.set_downscale_factor()
video_manager.start()
scene_manager.detect_scenes(frame_source=video_manager)
scene_list = scene_manager.get_scene_list()
print("分镜数据列表:")
sys.stdout.flush()
for i, scene in enumerate(scene_list):
shijian_list.append([scene[0].get_timecode(), scene[1].get_timecode()])
print(
"Scene %2d: Start %s / Frame %d, End %s / Frame %d"
% (
i + 1,
scene[0].get_timecode(),
scene[0].get_frames(),
scene[1].get_timecode(),
scene[1].get_frames(),
)
)
sys.stdout.flush()
finally:
video_manager.release()
return shijian_list
# 如果不存在就创建
def createDir(file_dir):
# 如果不存在文件夹,就创建
if not os.path.isdir(file_dir):
os.mkdir(file_dir)
# 切分一个视频
def ClipVideo(video_path, out_folder, image_out_folder, sensitivity, gpu_type):
shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表
shijian_list_len = len(shijian_list)
print("总共有%s个场景" % str(shijian_list_len))
sys.stdout.flush()
video_list = []
for i in range(0, shijian_list_len):
start_time_str = shijian_list[i][0]
end_time_str = shijian_list[i][1]
print("开始输出第" + str(i + 1) + "个分镜")
video_name = "{:05d}".format(i + 1)
out_video_file = os.path.join(out_folder, video_name + ".mp4")
sys.stdout.flush()
video_list.append(
{
"start_time_str": start_time_str,
"end_time_str": end_time_str,
"out_video_file": out_video_file,
"video_name": video_name,
}
)
# 使用 ffmpeg 裁剪视频
command = []
command.append("ffmpeg")
command.append("-i")
command.append(video_path)
command.append("-ss")
command.append(start_time_str)
command.append("-to")
command.append(end_time_str)
command.append("-c:v")
if gpu_type == "NVIDIA":
command.append("h264_nvenc")
elif gpu_type == "AMD":
command.append("h264_amf")
else:
command.append("libx264")
command.append("-preset")
command.append("fast")
command.append("-c:a")
command.append("copy")
command.append(out_video_file)
command.append("-loglevel")
command.append("error")
subprocess.run(
command,
check=True,
stderr=subprocess.PIPE,
)
print("分镜输出完成。开始抽帧")
sys.stdout.flush()
for vi in video_list:
h, m, s = vi["start_time_str"].split(":")
start_seconds = int(h) * 3600 + int(m) * 60 + float(s)
h, m, s = vi["end_time_str"].split(":")
end_seconds = int(h) * 3600 + int(m) * 60 + float(s)
print("正在抽帧:" + vi["video_name"])
sys.stdout.flush()
subprocess.run(
[
"ffmpeg",
"-ss",
str((end_seconds - start_seconds) / 2),
"-i",
vi["out_video_file"],
"-frames:v",
"1",
os.path.join(image_out_folder, vi["video_name"] + ".png"),
"-loglevel",
"error",
]
)
print("抽帧完成,开始识别文案")
sys.stdout.flush()
return video_list
def SplitAudio(video_out_folder, video_list):
# ffmpeg -i input_file.mp4 -vn -ab 128k output_file.mp3
print("正在分离音频!!")
mp3_list = []
sys.stdout.flush()
for v in video_list:
mp3_path = os.path.join(video_out_folder, v["video_name"] + ".mp3")
mp3_list.append(mp3_path)
subprocess.run(
[
"ffmpeg",
"-i",
v["out_video_file"],
"-vn",
"-ab",
"128k",
mp3_path,
"-loglevel",
"error",
],
check=True,
)
return mp3_list
def GetText(out_folder, mp3_list):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
for mp in mp3_list:
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
print("文本全部识别成功,正在写出")
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, "文案.txt"))
print("写出完成")
sys.stdout.flush()
def GetTextTask(out_folder, mp, name):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, name + ".txt"))
sys.stdout.flush()
def get_fram(video_path, out_path, sensitivity):
try:
shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表
print("总共有%s个场景" % str(len(shijian_list)))
print("开始输出json")
print(shijian_list)
# 将数组中的消息写道json文件中
with open(out_path, "w") as file:
# 将数组写入到指定的json文件
json.dump(shijian_list, file)
print("输出完成")
except Exception as e:
print("出现错误" + str(e))
exit(0)
def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type):
v_l = ClipVideo(
video_path, video_out_folder, image_out_folder, sensitivity, gpu_type
)
# 开始分离音频
m_l = SplitAudio(video_out_folder, v_l)
# 开始识别字幕
GetText(os.path.dirname(video_out_folder), m_l)

View File

@ -1,298 +0,0 @@
accelerate==0.30.1
addict==2.4.0
aiofiles==23.2.1
aiohttp==3.8.6
aiosignal==1.3.1
alibabacloud-bailian20230601==1.6.1
alibabacloud-credentials==0.3.3
alibabacloud-endpoint-util==0.0.3
alibabacloud-gateway-spi==0.0.1
alibabacloud-openapi-util==0.2.2
alibabacloud-tea==0.3.6
alibabacloud-tea-openapi==0.3.9
alibabacloud-tea-util==0.3.12
alibabacloud-tea-xml==0.0.2
aliyun-python-sdk-core==2.15.0
aliyun-python-sdk-kms==2.16.2
altair==5.3.0
altgraph==0.17.4
annotated-types==0.6.0
anthropic==0.26.1
antlr4-python3-runtime==4.9.3
anyio==4.3.0
APScheduler==3.10.4
arxiv==2.1.0
astor==0.8.1
asttokens==2.4.1
async-timeout==4.0.3
attrdict==2.0.1
attrs==23.2.0
av==11.0.0
azure-cognitiveservices-speech==1.37.0
Babel==2.15.0
backports.tarfile==1.1.1
baidu-aip==4.16.13
bce-python-sdk==0.9.11
beautifulsoup4==4.12.3
bidict==0.23.1
blinker==1.8.2
broadscope-bailian==1.3.1
cachetools==5.3.3
certifi==2024.2.2
cffi==1.16.0
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.3.2
chatgpt-tool-hub==0.5.0
cheroot==10.0.1
click==8.1.7
colorama==0.4.6
coloredlogs==15.0.1
comtypes==1.4.2
contourpy==1.2.1
controlnet-aux==0.0.3
crcmod==1.7
cryptography==42.0.5
cssselect==1.2.0
cssutils==2.11.0
ctranslate2==4.1.0
curl_cffi==0.6.4
cx-Logging==3.1.0
cx_Freeze==6.15.16
cycler==0.12.1
Cython==3.0.10
dashscope==1.19.2
datasets==2.18.0
decorator==4.4.2
diffusers==0.27.2
dill==0.3.8
dingtalk-stream==0.18.1
distlib==0.3.8
distro==1.9.0
dnspython==2.6.1
dulwich==0.22.1
easydict==1.13
edge-tts==6.1.12
einops==0.7.0
elevenlabs==1.0.3
email_validator==2.1.1
et-xmlfile==1.1.0
exceptiongroup==1.2.1
executing==2.0.1
fastapi==0.108.0
fastapi-cli==0.0.2
faster-whisper==1.0.1
feedparser==6.0.10
ffmpy==0.3.2
filelock==3.13.1
fire==0.6.0
Flask==3.0.3
flask-babel==4.0.0
flatbuffers==24.3.7
fonttools==4.53.0
frozenlist==1.4.1
fsspec==2024.2.0
future==1.0.0
gast==0.5.4
google-ai-generativelanguage==0.6.4
google-api-core==2.19.0
google-api-python-client==2.130.0
google-auth==2.29.0
google-auth-httplib2==0.2.0
google-generativeai==0.5.4
googleapis-common-protos==1.63.0
gradio==4.21.0
gradio_client==0.12.0
grpcio==1.64.0
grpcio-status==1.62.2
gTTS==2.5.1
h11==0.14.0
HTMLParser==0.0.2
httpcore==1.0.5
httplib2==0.22.0
httptools==0.6.1
httpx==0.27.0
huggingface-hub==0.23.2
humanfriendly==10.0
identify==2.5.36
idna==3.6
imageio==2.34.0
imageio-ffmpeg==0.4.9
imgaug==0.4.0
importlib_metadata==7.0.2
importlib_resources==6.4.0
install==1.3.5
IOPaint==1.3.3
ipython==8.24.0
itsdangerous==2.2.0
jaraco.context==5.3.0
jaraco.functools==4.0.1
jedi==0.19.1
Jinja2==3.1.3
jiter==0.4.0
jmespath==0.10.0
jsonschema==4.22.0
jsonschema-specifications==2023.12.1
kiwisolver==1.4.5
langid==1.1.6
lazy_loader==0.4
lief==0.14.1
linkai==0.0.6.0
lmdb==1.4.1
loguru==0.7.2
lxml==5.2.2
markdown-it-py==3.0.0
MarkupSafe==2.1.5
matplotlib==3.9.0
matplotlib-inline==0.1.7
mdurl==0.1.2
modelscope==1.13.1
more-itertools==10.2.0
moviepy==1.0.3
mpmath==1.3.0
multidict==6.0.5
multiprocess==0.70.16
networkx==3.2.1
nodeenv==1.8.0
Nuitka==2.1.2
numpy==1.24.2
omegaconf==2.3.0
onnxruntime==1.17.1
openai==0.27.8
opencv-contrib-python==4.6.0.66
opencv-python==4.6.0.66
opencv-python-headless==4.9.0.80
openpyxl==3.1.2
opt-einsum==3.3.0
optionaldict==0.1.2
ordered-set==4.1.0
orjson==3.10.3
oss2==2.18.4
packaging==24.0
paddleocr==2.7.3
paddlepaddle==2.6.1
pandas==2.2.1
parso==0.8.4
pdf2docx==0.5.8
pefile==2023.2.7
peft==0.7.1
piexif==1.1.3
pillow==10.3.0
platformdirs==4.2.0
pre-commit==3.7.1
premailer==3.10.0
proglog==0.1.10
prompt-toolkit==3.0.43
proto-plus==1.23.0
protobuf==3.20.2
psutil==5.9.8
pure-eval==0.2.2
pyarrow==15.0.1
pyarrow-hotfix==0.6
pyasn1==0.6.0
pyasn1_modules==0.4.0
pyclipper==1.3.0.post5
pycparser==2.21
pycryptodome==3.20.0
pydantic==2.5.3
pydantic_core==2.14.6
pydub==0.25.1
Pygments==2.18.0
pyinstaller==6.5.0
pyinstaller-hooks-contrib==2024.3
PyJWT==2.8.0
PyMuPDF==1.24.5
PyMuPDFb==1.24.3
pyOpenSSL==24.1.0
pyoxidizer==0.24.0
pyparsing==3.1.2
pypiwin32==223
pypng==0.20220715.0
PyQRCode==1.2.1
pyreadline3==3.4.1
pytesseract==0.3.10
python-dateutil==2.9.0.post0
python-docx==1.1.2
python-dotenv==1.0.1
python-engineio==4.9.1
python-multipart==0.0.9
python-socketio==5.7.2
pyttsx3==2.90
pytz==2024.1
pywin32==306
pywin32-ctypes==0.2.2
PyYAML==6.0.1
qrcode==7.4.2
rapidfuzz==3.9.3
rarfile==4.2
referencing==0.35.1
regex==2024.5.15
requests==2.31.0
rich==13.7.1
rpds-py==0.18.1
rsa==4.9
ruff==0.4.7
safetensors==0.4.3
scenedetect==0.6.3
scikit-image==0.23.2
scipy==1.12.0
semantic-version==2.10.0
sgmllib3k==1.0.0
shapely==2.0.4
shellingham==1.5.4
simple-websocket==1.0.0
simplejson==3.19.2
six==1.16.0
sniffio==1.3.1
sortedcontainers==2.4.0
soupsieve==2.5
SpeechRecognition==3.10.4
stack-data==0.6.3
starlette==0.32.0.post1
sympy==1.12
tenacity==8.2.3
termcolor==2.4.0
tifffile==2024.5.22
tiktoken==0.4.0
timm==1.0.3
tokenizers==0.19.1
tomli==2.0.1
tomlkit==0.12.0
toolz==0.12.1
torch==2.1.2+cu118
torchvision==0.16.2+cu118
tqdm==4.66.2
traitlets==5.14.3
transformers==4.41.2
typer==0.12.3
typer-config==1.4.0
typing_extensions==4.10.0
tzdata==2024.1
tzlocal==5.2
ujson==5.9.0
uritemplate==4.1.1
urllib3==2.2.1
utility==1.0
uvicorn==0.29.0
virtualenv==20.26.2
visualdl==2.5.3
watchfiles==0.21.0
wcwidth==0.2.13
web.py==0.62
websocket-client==1.2.0
websockets==11.0.3
wechatpy==1.8.18
Werkzeug==3.0.3
wikipedia==1.4.0
win32-setctime==1.1.0
wolframalpha==5.0.0
wsproto==1.2.0
xlrd==2.0.1
xmltodict==0.13.0
xxhash==3.4.1
yacs==0.1.8
yapf==0.40.2
yarl==1.9.4
zhipuai==2.1.0.20240521
zipp==3.18.1
zstandard==0.22.0

View File

@ -8,8 +8,8 @@ import os
import sys import sys
import json import json
import subprocess import subprocess
from huggingface_hub import hf_hub_download # from huggingface_hub import hf_hub_download
from faster_whisper import WhisperModel # from faster_whisper import WhisperModel
import public_tools import public_tools
from pathlib import Path from pathlib import Path
@ -174,111 +174,111 @@ def SplitAudio(video_out_folder, video_list):
return mp3_list return mp3_list
def GetText(out_folder, mp3_list): # def GetText(out_folder, mp3_list):
text = [] # text = []
# 先获取模型 # # 先获取模型
print("正在下载或加载模型") # print("正在下载或加载模型")
sys.stdout.flush() # sys.stdout.flush()
model_path = Path( # model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") # hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="config.json", # filename="config.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json", # filename="preprocessor_config.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json", # filename="tokenizer.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json", # filename="vocabulary.json",
) # )
model = WhisperModel( # model = WhisperModel(
model_size_or_path=os.path.dirname(model_path), # model_size_or_path=os.path.dirname(model_path),
device="auto", # device="auto",
local_files_only=True, # local_files_only=True,
) # )
print("模型加载成功,开始识别") # print("模型加载成功,开始识别")
sys.stdout.flush() # sys.stdout.flush()
for mp in mp3_list: # for mp in mp3_list:
segments, info = model.transcribe( # segments, info = model.transcribe(
mp, # mp,
beam_size=5, # beam_size=5,
language="zh", # language="zh",
vad_filter=True, # vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000), # vad_parameters=dict(min_silence_duration_ms=1000),
) # )
tmp_text = "" # tmp_text = ""
for segment in segments: # for segment in segments:
tmp_text += segment.text + "" # tmp_text += segment.text + "。"
print(mp + "识别完成") # print(mp + "识别完成")
sys.stdout.flush() # sys.stdout.flush()
text.append(tmp_text) # text.append(tmp_text)
# 数据写出 # # 数据写出
print("文本全部识别成功,正在写出") # print("文本全部识别成功,正在写出")
sys.stdout.flush() # sys.stdout.flush()
tools = public_tools.PublicTools() # tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, "文案.txt")) # tools.write_to_file(text, os.path.join(out_folder, "文案.txt"))
print("写出完成") # print("写出完成")
sys.stdout.flush() # sys.stdout.flush()
def GetTextTask(out_folder, mp, name): # def GetTextTask(out_folder, mp, name):
text = [] # text = []
# 先获取模型 # # 先获取模型
print("正在下载或加载模型") # print("正在下载或加载模型")
sys.stdout.flush() # sys.stdout.flush()
model_path = Path( # model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") # hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="config.json", # filename="config.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json", # filename="preprocessor_config.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json", # filename="tokenizer.json",
) # )
hf_hub_download( # hf_hub_download(
repo_id="Systran/faster-whisper-large-v3", # repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json", # filename="vocabulary.json",
) # )
model = WhisperModel( # model = WhisperModel(
model_size_or_path=os.path.dirname(model_path), # model_size_or_path=os.path.dirname(model_path),
device="auto", # device="auto",
local_files_only=True, # local_files_only=True,
) # )
print("模型加载成功,开始识别") # print("模型加载成功,开始识别")
sys.stdout.flush() # sys.stdout.flush()
segments, info = model.transcribe( # segments, info = model.transcribe(
mp, # mp,
beam_size=5, # beam_size=5,
language="zh", # language="zh",
vad_filter=True, # vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000), # vad_parameters=dict(min_silence_duration_ms=1000),
) # )
tmp_text = "" # tmp_text = ""
for segment in segments: # for segment in segments:
tmp_text += segment.text + "" # tmp_text += segment.text + "。"
print(mp + "识别完成") # print(mp + "识别完成")
sys.stdout.flush() # sys.stdout.flush()
text.append(tmp_text) # text.append(tmp_text)
# 数据写出 # # 数据写出
sys.stdout.flush() # sys.stdout.flush()
tools = public_tools.PublicTools() # tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, name + ".txt")) # tools.write_to_file(text, os.path.join(out_folder, name + ".txt"))
sys.stdout.flush() # sys.stdout.flush()
def get_fram(video_path, out_path, sensitivity): def get_fram(video_path, out_path, sensitivity):
@ -297,12 +297,12 @@ def get_fram(video_path, out_path, sensitivity):
exit(0) exit(0)
def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type): # def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type):
v_l = ClipVideo( # v_l = ClipVideo(
video_path, video_out_folder, image_out_folder, sensitivity, gpu_type # video_path, video_out_folder, image_out_folder, sensitivity, gpu_type
) # )
# 开始分离音频 # # 开始分离音频
m_l = SplitAudio(video_out_folder, v_l) # m_l = SplitAudio(video_out_folder, v_l)
# 开始识别字幕 # # 开始识别字幕
GetText(os.path.dirname(video_out_folder), m_l) # GetText(os.path.dirname(video_out_folder), m_l)

View File

@ -8,7 +8,7 @@ import { DEFINE_STRING } from "../../../define/define_string";
import path from 'path' import path from 'path'
import { BasicReverse } from './basicReverse' import { BasicReverse } from './basicReverse'
import { BookTaskDetailService } from '../../../define/db/service/Book/bookTaskDetailService' import { BookTaskDetailService } from '../../../define/db/service/Book/bookTaskDetailService'
import { TaskScheduler } from "../task/taskScheduler" import { LogScheduler } from "../task/logScheduler"
import { Book } from '../../../model/book' import { Book } from '../../../model/book'
import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum' import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum'
import { GeneralResponse } from '../../../model/generalResponse' import { GeneralResponse } from '../../../model/generalResponse'
@ -28,7 +28,7 @@ import { isEmpty } from 'lodash'
*/ */
export class ReverseBook { export class ReverseBook {
basicReverse: BasicReverse basicReverse: BasicReverse
taskScheduler: TaskScheduler logScheduler: LogScheduler
mjOpt: MJOpt = new MJOpt() mjOpt: MJOpt = new MJOpt()
sdOpt: SDOpt = new SDOpt() sdOpt: SDOpt = new SDOpt()
tagDefine: TagDefine tagDefine: TagDefine
@ -42,7 +42,7 @@ export class ReverseBook {
this.tagDefine = new TagDefine() this.tagDefine = new TagDefine()
this.subtitle = new Subtitle() this.subtitle = new Subtitle()
this.watermark = new Watermark() this.watermark = new Watermark()
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
this.bookServiceBasic = new BookServiceBasic() this.bookServiceBasic = new BookServiceBasic()
this.bookBasic = new BookBasic() this.bookBasic = new BookBasic()
} }
@ -301,7 +301,7 @@ export class ReverseBook {
await this.bookServiceBasic.AddBookBackTask(book.id, task_type, TaskExecuteType.AUTO, bookTaskDetail.bookTaskId, bookTaskDetail.id, DEFINE_STRING.BOOK.REVERSE_PROMPT_RETURN await this.bookServiceBasic.AddBookBackTask(book.id, task_type, TaskExecuteType.AUTO, bookTaskDetail.bookTaskId, bookTaskDetail.id, DEFINE_STRING.BOOK.REVERSE_PROMPT_RETURN
); );
// 添加返回日志 // 添加返回日志
await this.taskScheduler.AddLogToDB(book.id, book.type, `添加 ${task_type} 反推任务成功`, bookTaskDetail.bookTaskId, LoggerStatus.SUCCESS) await this.logScheduler.AddLogToDB(book.id, book.type, `添加 ${task_type} 反推任务成功`, bookTaskDetail.bookTaskId, LoggerStatus.SUCCESS)
} }
} catch (error) { } catch (error) {
throw error throw error

View File

@ -5,7 +5,7 @@ const { exec } = require('child_process')
const execAsync = util.promisify(exec) const execAsync = util.promisify(exec)
import { define } from '../../../define/define' import { define } from '../../../define/define'
import { BookService } from '../../../define/db/service/Book/bookService' import { BookService } from '../../../define/db/service/Book/bookService'
import { TaskScheduler } from '../task/taskScheduler' import { LogScheduler } from '../task/logScheduler'
import { LoggerStatus, LoggerType, OtherData } from '../../../define/enum/softwareEnum' import { LoggerStatus, LoggerType, OtherData } from '../../../define/enum/softwareEnum'
import { errorMessage, successMessage } from '../../Public/generalTools' import { errorMessage, successMessage } from '../../Public/generalTools'
import { CheckFileOrDirExist, CheckFolderExistsOrCreate } from '../../../define/Tools/file' import { CheckFileOrDirExist, CheckFolderExistsOrCreate } from '../../../define/Tools/file'
@ -35,11 +35,11 @@ export class BasicReverse {
bookTaskDetailService: BookTaskDetailService bookTaskDetailService: BookTaskDetailService
bookBackTaskListService: BookBackTaskListService bookBackTaskListService: BookBackTaskListService
taskScheduler: TaskScheduler logScheduler: LogScheduler
ffmpegOptions: FfmpegOptions ffmpegOptions: FfmpegOptions
constructor() { constructor() {
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
this.ffmpegOptions = new FfmpegOptions() this.ffmpegOptions = new FfmpegOptions()
} }
@ -109,7 +109,7 @@ export class BasicReverse {
if (taskRes.code == 0) { if (taskRes.code == 0) {
throw new Error(taskRes.message) throw new Error(taskRes.message)
} }
this.taskScheduler.AddLogToDB( this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`添加分镜任务成功`, `添加分镜任务成功`,
@ -149,7 +149,7 @@ export class BasicReverse {
let sensitivity = 30 let sensitivity = 30
// 开始之前,推送日志 // 开始之前,推送日志
let log_content = `开始进行分镜操作,视频地址:${oldVideoPath},敏感度:${sensitivity},正在调用程序进行处理` let log_content = `开始进行分镜操作,视频地址:${oldVideoPath},敏感度:${sensitivity},正在调用程序进行处理`
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
log_content, log_content,
@ -170,7 +170,7 @@ export class BasicReverse {
// 有错误输出 // 有错误输出
if (output.stderr != '') { if (output.stderr != '') {
let error_msg = `分镜成功,但有警告提示:${output.stderr}` let error_msg = `分镜成功,但有警告提示:${output.stderr}`
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
error_msg, error_msg,
@ -187,7 +187,7 @@ export class BasicReverse {
BookTaskStatus.STORYBOARD_FAIL, BookTaskStatus.STORYBOARD_FAIL,
error_message error_message
) )
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
error_message, error_message,
@ -205,7 +205,7 @@ export class BasicReverse {
BookTaskStatus.STORYBOARD_FAIL, BookTaskStatus.STORYBOARD_FAIL,
error_msg error_msg
) )
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
error_msg, error_msg,
@ -237,7 +237,7 @@ export class BasicReverse {
this.bookTaskService.UpdateBookTaskStatus(bookTaskId, BookTaskStatus.STORYBOARD_DONE) this.bookTaskService.UpdateBookTaskStatus(bookTaskId, BookTaskStatus.STORYBOARD_DONE)
// 分镜成功,推送日志 // 分镜成功,推送日志
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`分镜成功,分镜数据如下:${frameJsonData}`, `分镜成功,分镜数据如下:${frameJsonData}`,
@ -310,7 +310,7 @@ export class BasicReverse {
if (bookTaskDetail.data.length <= 0) { if (bookTaskDetail.data.length <= 0) {
// 传入的分镜数据为空,需要重新获取 // 传入的分镜数据为空,需要重新获取
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`没有传入分镜数据,开始调用分镜方法`, `没有传入分镜数据,开始调用分镜方法`,
@ -339,7 +339,7 @@ export class BasicReverse {
this.bookTaskService.UpdateBookTaskStatus(bookTask.id, BookTaskStatus.SPLIT) this.bookTaskService.UpdateBookTaskStatus(bookTask.id, BookTaskStatus.SPLIT)
// 有分镜数据,开始处理 // 有分镜数据,开始处理
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`成功获取分镜数据,开始添加裁剪视频任务`, `成功获取分镜数据,开始添加裁剪视频任务`,
@ -363,7 +363,7 @@ export class BasicReverse {
} }
} }
// 添加日志 // 添加日志
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`添加视频裁剪任务成功`, `添加视频裁剪任务成功`,
@ -424,7 +424,7 @@ export class BasicReverse {
// 小改小说批次的状态 // 小改小说批次的状态
this.bookTaskService.UpdateBookTaskStatus(bookTaskDetail.bookTaskId, BookTaskStatus.SPLIT_DONE) this.bookTaskService.UpdateBookTaskStatus(bookTaskDetail.bookTaskId, BookTaskStatus.SPLIT_DONE)
// 结束,分镜完毕,推送日志,返回成功 // 结束,分镜完毕,推送日志,返回成功
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookTaskDetail.bookId, bookTaskDetail.bookId,
book.type, book.type,
`${bookTaskDetail.name}_视频裁剪完成`, `${bookTaskDetail.name}_视频裁剪完成`,
@ -490,7 +490,7 @@ export class BasicReverse {
}) })
} }
if (bookTaskRes.data.bookTasks.length <= 0 || bookTaskRes.data.total <= 0) { if (bookTaskRes.data.bookTasks.length <= 0 || bookTaskRes.data.total <= 0) {
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`没有找到对应的小说批次任务数据请检查bookId是否正确`, `没有找到对应的小说批次任务数据请检查bookId是否正确`,
@ -508,7 +508,7 @@ export class BasicReverse {
bookTaskId: bookTask.id bookTaskId: bookTask.id
}) })
if (bookTaskDetails.data.length <= 0) { if (bookTaskDetails.data.length <= 0) {
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`没有找到对应的小说批次任务数据请检查bookId是否正确或者手动执行`, `没有找到对应的小说批次任务数据请检查bookId是否正确或者手动执行`,
@ -531,7 +531,7 @@ export class BasicReverse {
throw new Error(taskRes.message) throw new Error(taskRes.message)
} }
// 添加日志 // 添加日志
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`添加音频 ${taskRes.data.name} 分离任务成功`, `添加音频 ${taskRes.data.name} 分离任务成功`,
@ -588,7 +588,7 @@ export class BasicReverse {
}) })
// 推送成功消息 // 推送成功消息
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
task.bookId, task.bookId,
book.type, book.type,
`${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`, `${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`,
@ -656,7 +656,7 @@ export class BasicReverse {
throw new Error(taskRes.message) throw new Error(taskRes.message)
} }
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`添加 ${taskRes.data.name} 抽帧任务成功`, `添加 ${taskRes.data.name} 抽帧任务成功`,
@ -701,7 +701,7 @@ export class BasicReverse {
}) })
// 推送成功消息 // 推送成功消息
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
book.id, book.id,
book.type, book.type,
`${bookTaskDetail.name}抽帧成功,输出地址:${outputFramePath}`, `${bookTaskDetail.name}抽帧成功,输出地址:${outputFramePath}`,
@ -797,7 +797,7 @@ export class BasicReverse {
} }
} }
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`添加提取字幕任务成功`, `添加提取字幕任务成功`,
@ -821,9 +821,9 @@ export class BasicReverse {
// 判断是不是用本地的wisper服务 // 判断是不是用本地的wisper服务
if (isWisper) { if (isWisper) {
// 开始调用wisper // 开始调用wisper
// 使用异步的方法调用一个python程序然后写入到指定的json文件中k // 使用异步的方法调用一个python程序然后写入到指定的json文件中
let out_dir = path.dirname(bookTaskDetail.videoPath) let out_dir = path.dirname(bookTaskDetail.videoPath)
// #TODO -t 被移除
let command = `"${path.join(define.scripts_path, 'Lai.exe')}" "-t" "${out_dir}" "${bookTaskDetail.audioPath let command = `"${path.join(define.scripts_path, 'Lai.exe')}" "-t" "${out_dir}" "${bookTaskDetail.audioPath
}" "${bookTaskDetail.name}"` }" "${bookTaskDetail.name}"`
const output = await execAsync(command, { const output = await execAsync(command, {
@ -833,7 +833,7 @@ export class BasicReverse {
// 有错误输出 // 有错误输出
if (output.stderr != '') { if (output.stderr != '') {
let error_msg = `提取字幕成功,但有警告提示:${output.stderr}` let error_msg = `提取字幕成功,但有警告提示:${output.stderr}`
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
book.id, book.id,
book.type, book.type,
error_msg, error_msg,
@ -856,7 +856,7 @@ export class BasicReverse {
}) })
// 提取字幕成功,推送日志 // 提取字幕成功,推送日志
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
book.id, book.id,
book.type, book.type,
`${bookTaskDetail.name} 提取字幕成功`, `${bookTaskDetail.name} 提取字幕成功`,

View File

@ -7,7 +7,7 @@ import { FfmpegOptions } from "../ffmpegOptions";
import { CheckFileOrDirExist, CopyFileOrFolder, DeleteFolderAllFile } from "../../../define/Tools/file"; import { CheckFileOrDirExist, CopyFileOrFolder, DeleteFolderAllFile } from "../../../define/Tools/file";
import fs from 'fs'; import fs from 'fs';
import { Book } from "../../../model/book"; import { Book } from "../../../model/book";
import { TaskScheduler } from '../task/taskScheduler'; import { LogScheduler } from '../task/logScheduler';
import { BookBasic } from "./BooKBasic"; import { BookBasic } from "./BooKBasic";
import { LoggerStatus, OtherData } from "../../../define/enum/softwareEnum"; import { LoggerStatus, OtherData } from "../../../define/enum/softwareEnum";
import { BasicReverse } from "./basicReverse"; import { BasicReverse } from "./basicReverse";
@ -15,18 +15,17 @@ import { BasicReverse } from "./basicReverse";
export class BookFrame { export class BookFrame {
bookServiceBasic: BookServiceBasic bookServiceBasic: BookServiceBasic
ffmpegOptions: FfmpegOptions ffmpegOptions: FfmpegOptions
taskScheduler: TaskScheduler logScheduler: LogScheduler
basicReverse: BasicReverse basicReverse: BasicReverse
bookBasic: BookBasic bookBasic: BookBasic
constructor() { constructor() {
this.bookServiceBasic = new BookServiceBasic(); this.bookServiceBasic = new BookServiceBasic();
this.ffmpegOptions = new FfmpegOptions(); this.ffmpegOptions = new FfmpegOptions();
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
this.bookBasic = new BookBasic() this.bookBasic = new BookBasic()
this.basicReverse = new BasicReverse() this.basicReverse = new BasicReverse()
} }
/** /**
* *
* @param bookTaskDetailId ID * @param bookTaskDetailId ID
@ -138,7 +137,7 @@ export class BookFrame {
}) })
} catch (error) { } catch (error) {
// 传入的分镜数据为空,需要重新获取 // 传入的分镜数据为空,需要重新获取
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
`没有传入分镜数据,请先进行分镜计算`, `没有传入分镜数据,请先进行分镜计算`,
@ -164,7 +163,7 @@ export class BookFrame {
} }
let res = await this.basicReverse.FrameDataToCutVideoData(item, shortClipData[i]); let res = await this.basicReverse.FrameDataToCutVideoData(item, shortClipData[i]);
} }
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
bookId, bookId,
book.type, book.type,
"所有的视频裁剪完成,开始抽帧", "所有的视频裁剪完成,开始抽帧",

View File

@ -173,6 +173,12 @@ export class BookVideo {
} }
/**
* 稿
* @param id
* @param operateBookType
* @returns
*/
async AddJianyingDraft(id: string, operateBookType: OperateBookType): Promise<GeneralResponse.ErrorItem | GeneralResponse.SuccessItem> { async AddJianyingDraft(id: string, operateBookType: OperateBookType): Promise<GeneralResponse.ErrorItem | GeneralResponse.SuccessItem> {
try { try {
await this.InitService(); await this.InitService();

View File

@ -13,7 +13,7 @@ import { MJSetting } from "../../../model/Setting/mjSetting";
import { GeneralResponse } from "../../../model/generalResponse" import { GeneralResponse } from "../../../model/generalResponse"
import { LoggerStatus, ResponseMessageType } from "../../../define/enum/softwareEnum"; import { LoggerStatus, ResponseMessageType } from "../../../define/enum/softwareEnum";
import { ImageStyle } from "../Book/imageStyle"; import { ImageStyle } from "../Book/imageStyle";
import { TaskScheduler } from "../task/taskScheduler"; import { LogScheduler } from "../task/logScheduler";
import { Tools } from "../../../main/tools" import { Tools } from "../../../main/tools"
import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic"; import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic";
import { PresetService } from "../presetService"; import { PresetService } from "../presetService";
@ -28,14 +28,14 @@ export class MJOpt {
mjApi: MJApi; mjApi: MJApi;
mjSetting: MJSetting.MjSetting mjSetting: MJSetting.MjSetting
imageStyle: ImageStyle; imageStyle: ImageStyle;
taskScheduler: TaskScheduler; logScheduler: LogScheduler;
tools: Tools; tools: Tools;
bookServiceBasic: BookServiceBasic bookServiceBasic: BookServiceBasic
presetService: PresetService presetService: PresetService
softWareServiceBasic: SoftWareServiceBasic softWareServiceBasic: SoftWareServiceBasic
constructor() { constructor() {
this.imageStyle = new ImageStyle() this.imageStyle = new ImageStyle()
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
this.tools = new Tools() this.tools = new Tools()
this.bookServiceBasic = new BookServiceBasic(); this.bookServiceBasic = new BookServiceBasic();
this.presetService = new PresetService() this.presetService = new PresetService()
@ -528,7 +528,7 @@ export class MJOpt {
let taskRes = await this.bookServiceBasic.AddBookBackTask(element.bookId, BookBackTaskType.MJ_IMAGE, TaskExecuteType.AUTO, element.bookTaskId, element.id, responseMessageName let taskRes = await this.bookServiceBasic.AddBookBackTask(element.bookId, BookBackTaskType.MJ_IMAGE, TaskExecuteType.AUTO, element.bookTaskId, element.id, responseMessageName
); );
// 添加返回日志 // 添加返回日志
await this.taskScheduler.AddLogToDB(element.bookId, BookBackTaskType.MJ_IMAGE, `添加 ${element.name} MJ生成任务成功`, element.bookTaskId, LoggerStatus.SUCCESS) await this.logScheduler.AddLogToDB(element.bookId, BookBackTaskType.MJ_IMAGE, `添加 ${element.name} MJ生成任务成功`, element.bookTaskId, LoggerStatus.SUCCESS)
} }
// 全部完毕 // 全部完毕
return successMessage(null, "MJ添加生成图片任务成功", "MJOpt_AddGenerateImageTask") return successMessage(null, "MJ添加生成图片任务成功", "MJOpt_AddGenerateImageTask")

View File

@ -48,8 +48,10 @@ export class SDOpt {
const id = ids[i]; const id = ids[i];
let scene = await this.presetService.GetScenePresetDetailById(id) let scene = await this.presetService.GetScenePresetDetailById(id)
if (scene.code == 1) { if (scene.code == 1) {
if (scene.data) {
// 这边开始拼接 // 这边开始拼接
result += scene.data.prompt + ', ' result += scene.data.prompt + ', '
}
} else { } else {
throw new Error(scene.message) throw new Error(scene.message)
} }
@ -68,10 +70,12 @@ export class SDOpt {
const id = ids[i]; const id = ids[i];
let character = await this.presetService.GetCharacterPresetDetailById(id) let character = await this.presetService.GetCharacterPresetDetailById(id)
if (character.code == 1) { if (character.code == 1) {
if (character.data) {
result += character.data.prompt + ', ' result += character.data.prompt + ', '
if (character.data.lora && character.data.lora != '无' && character.data.loraWeight) { if (character.data.lora && character.data.lora != '无' && character.data.loraWeight) {
result += `, <lora:${character.data.lora}:${character.data.lora_weight}>` result += `, <lora:${character.data.lora}:${character.data.lora_weight}>`
} }
}
} else { } else {
throw new Error(character.message) throw new Error(character.message)
} }

View File

@ -69,7 +69,7 @@ class BookServiceBasic {
GetBookTaskDetailDataById = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailDataById(bookTaskDetailId); GetBookTaskDetailDataById = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailDataById(bookTaskDetailId);
GetBookTaskDetailData = async (condition: Book.QueryBookTaskDetailCondition, returnEmpty: boolean = false) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailData(condition, returnEmpty); GetBookTaskDetailData = async (condition: Book.QueryBookTaskDetailCondition, returnEmpty: boolean = false) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailData(condition, returnEmpty);
UpdateBookTaskDetail = async (bookTaskDetailId: string, data: Book.SelectBookTaskDetail) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetail(bookTaskDetailId, data); UpdateBookTaskDetail = async (bookTaskDetailId: string, data: Book.SelectBookTaskDetail) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetail(bookTaskDetailId, data);
UpdateBookTaskStatus = async (bookTaskDetailId: string, status: BookTaskStatus) => await this.bookTaskDetailServiceBasic.UpdateBookTaskStatus(bookTaskDetailId, status); UpdateBookTaskStatus = async (bookTaskDetailId: string, status: BookTaskStatus,errorMsg? : string) => await this.bookTaskDetailServiceBasic.UpdateBookTaskStatus(bookTaskDetailId, status,errorMsg);
DeleteBookTaskDetailReversePromptById = async (bookTaskDetailId: string, reversePromptId: string) => await this.bookTaskDetailServiceBasic.DeleteBookTaskDetailReversePromptById(bookTaskDetailId); DeleteBookTaskDetailReversePromptById = async (bookTaskDetailId: string, reversePromptId: string) => await this.bookTaskDetailServiceBasic.DeleteBookTaskDetailReversePromptById(bookTaskDetailId);
DeleteBoookTaskDetailGenerateImage = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.DeleteBoookTaskDetailGenerateImage(bookTaskDetailId); DeleteBoookTaskDetailGenerateImage = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.DeleteBoookTaskDetailGenerateImage(bookTaskDetailId);
UpdateBookTaskDetailReversePrompt = async (bookTaskDetailId: string, reversePromptId: string, data: Book.ReversePrompt) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetailReversePrompt(bookTaskDetailId, reversePromptId, data); UpdateBookTaskDetailReversePrompt = async (bookTaskDetailId: string, reversePromptId: string, data: Book.ReversePrompt) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetailReversePrompt(bookTaskDetailId, reversePromptId, data);

View File

@ -16,7 +16,7 @@ import fs from 'fs'
import { GeneralResponse } from '../../../model/generalResponse' import { GeneralResponse } from '../../../model/generalResponse'
import { BookServiceBasic } from '../ServiceBasic/bookServiceBasic' import { BookServiceBasic } from '../ServiceBasic/bookServiceBasic'
import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum' import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum'
import { TaskScheduler } from '../task/taskScheduler' import { LogScheduler } from '../task/logScheduler'
import { SubtitleModel } from '../../../model/subtitle' import { SubtitleModel } from '../../../model/subtitle'
import { BookTaskStatus, OperateBookType } from '../../../define/enum/bookEnum' import { BookTaskStatus, OperateBookType } from '../../../define/enum/bookEnum'
import axios from 'axios' import axios from 'axios'
@ -24,8 +24,9 @@ import { GptService } from '../GPT/gpt'
import FormData from 'form-data' import FormData from 'form-data'
import { RetryWithBackoff } from '../../../define/Tools/common' import { RetryWithBackoff } from '../../../define/Tools/common'
import { DEFINE_STRING } from '../../../define/define_string' import { DEFINE_STRING } from '../../../define/define_string'
import { DraftTimeLineJson } from '../jianying/jianyingService'
const util = require('util') const util = require('util')
const { exec } = require('child_process') const { spawn, exec } = require('child_process')
const execAsync = util.promisify(exec) const execAsync = util.promisify(exec)
const fspromises = fs.promises const fspromises = fs.promises
@ -36,12 +37,12 @@ const fspromises = fs.promises
export class Subtitle { export class Subtitle {
ffmpegOptions: FfmpegOptions ffmpegOptions: FfmpegOptions
bookServiceBasic: BookServiceBasic bookServiceBasic: BookServiceBasic
taskScheduler: TaskScheduler logScheduler: LogScheduler
gptService: GptService gptService: GptService
constructor() { constructor() {
this.bookServiceBasic = new BookServiceBasic() this.bookServiceBasic = new BookServiceBasic()
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
this.ffmpegOptions = new FfmpegOptions() this.ffmpegOptions = new FfmpegOptions()
this.gptService = new GptService() this.gptService = new GptService()
} }
@ -74,28 +75,6 @@ export class Subtitle {
return frameTimes return frameTimes
} }
/**
*
* @param bookId ID
* @param bookTaskId ID
* @returns
*/
async GetBookAllData(bookId: string, bookTaskId: string = null): Promise<{ book: Book.SelectBook, bookTask: Book.SelectBookTask, bookTaskDetails: Book.SelectBookTaskDetail[] }> {
let { book, bookTask } = await this.bookServiceBasic.GetBookAndTask(bookId, bookTaskId ? bookTaskId : 'output_00001')
if (isEmpty(book.subtitlePosition)) {
throw new Error("请先设置小说的字幕位置")
}
// 获取所有的分镜数据
let bookTaskDetails = await this.bookServiceBasic.GetBookTaskDetailData({
bookId: bookId,
bookTaskId: bookTask.id
})
if (bookTaskDetails.length <= 0) {
throw new Error("没有找到对应的分镜数据,请先执行对应的操作")
}
return { book, bookTask, bookTaskDetails }
}
/** /**
* *
* @param content * @param content
@ -123,7 +102,7 @@ export class Subtitle {
}, DEFINE_STRING.BOOK.GET_COPYWRITING_RETURN) }, DEFINE_STRING.BOOK.GET_COPYWRITING_RETURN)
// 添加日志 // 添加日志
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
book.id, book.id,
book.type, book.type,
`${bookTaskDetail.name} 识别文案成功`, `${bookTaskDetail.name} 识别文案成功`,
@ -562,7 +541,7 @@ export class Subtitle {
}) })
// 推送成功消息 // 推送成功消息
await this.taskScheduler.AddLogToDB( await this.logScheduler.AddLogToDB(
book.id, book.id,
book.type, book.type,
`${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`, `${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`,
@ -667,4 +646,73 @@ export class Subtitle {
} }
} }
//#endregion //#endregion
//#region 本地Whisper识别字幕相关操作
async GetTextByLocalWhisper(frameTimeList: DraftTimeLineJson[], outDir: string, mp3Dir: string, localWhisperPath?: string): Promise<void> {
try {
let localWhisperPathExePath = localWhisperPath
if (isEmpty(localWhisperPathExePath)) {
localWhisperPathExePath = path.join(define.scripts_path, 'localWhisper/local_whisper.exe')
}
return new Promise((resolve, reject) => {
let child = spawn(
localWhisperPathExePath,
['-ts', outDir, mp3Dir],
{ encoding: 'utf-8' }
);
child.on('error', (error) => {
console.log('error=', error)
this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败输出失败信息如下" + error.message))
reject(new Error(error.message))
})
child.stdout.on('data', (data) => {
console.log(data.toString())
this.logScheduler.ReturnLogger(successMessage(data.toString(), "使用localWhisper识别字幕输出"))
})
child.stderr.on('data', (data) => {
console.log('stderr=', data.toString())
this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败输出失败信息如下stderr = " + data.toString()))
reject(new Error(data.toString()))
})
child.on('close', async (data) => {
console.log('data=', data.toString())
this.logScheduler.ReturnLogger(successMessage(data.toString(), "使用localWhisper识别字幕完成"))
let textPath = path.join(outDir, '文案.txt')
if (!await CheckFileOrDirExist(textPath)) {
throw new Error('没有找到识别输出的文案文件')
}
let text = await fspromises.readFile(textPath, 'utf-8')
let textLines = text.split(/\r?\n/)
let lastLine = textLines[textLines.length - 1]
// 丢掉最后一行
textLines = textLines.slice(0, -1)
if (textLines.length != frameTimeList.length) {
throw new Error('分镜和识别文案数量不一致')
}
// 保存文案
for (let i = 0; i < textLines.length; i++) {
const element = textLines[i];
frameTimeList[i].text = element
}
// 写出
await fspromises.writeFile(path.join(global.config.project_path, '文案.txt'), textLines.join('\n'), 'utf-8')
if (data == 0) {
this.logScheduler.ReturnLogger(successMessage(null, "使用localWhisper识别字幕完成"))
} else {
this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败失败信息请查看日志"))
}
resolve();
})
})
} catch (error) {
this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败失败信息如下" + error.message))
throw error
}
}
//#endregion
} }

View File

@ -11,7 +11,7 @@ import fs from 'fs'
import { CheckFileOrDirExist } from "../../../define/Tools/file"; import { CheckFileOrDirExist } from "../../../define/Tools/file";
import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic"; import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic";
import { Subtitle } from "./subtitle"; import { Subtitle } from "./subtitle";
import { TaskScheduler } from "../task/taskScheduler"; import { LogScheduler } from "../task/logScheduler";
import { BookTaskStatus, BookType, OperateBookType } from "../../../define/enum/bookEnum"; import { BookTaskStatus, BookType, OperateBookType } from "../../../define/enum/bookEnum";
import { Book } from "../../../model/book"; import { Book } from "../../../model/book";
import { TimeStringToMilliseconds } from "../../../define/Tools/time"; import { TimeStringToMilliseconds } from "../../../define/Tools/time";
@ -20,7 +20,7 @@ export class SubtitleService {
softWareServiceBasic: SoftWareServiceBasic softWareServiceBasic: SoftWareServiceBasic
bookServiceBasic: BookServiceBasic bookServiceBasic: BookServiceBasic
subtitle: Subtitle subtitle: Subtitle
taskScheduler: TaskScheduler logScheduler: LogScheduler
constructor() { constructor() {
this.softWareServiceBasic = new SoftWareServiceBasic(); this.softWareServiceBasic = new SoftWareServiceBasic();
this.bookServiceBasic = new BookServiceBasic(); this.bookServiceBasic = new BookServiceBasic();

View File

@ -98,8 +98,6 @@ export class FfmpegOptions {
/** /**
* FFmpeg裁剪视频 * FFmpeg裁剪视频
* @param {*} book
* @param {*} bookTask
* @param {*} startTime * @param {*} startTime
* @param {*} endTime * @param {*} endTime
* @param {*} videoPath * @param {*} videoPath
@ -225,7 +223,7 @@ export class FfmpegOptions {
// 判断分镜是不是和数据库中的数据匹配的上 // 判断分镜是不是和数据库中的数据匹配的上
let res = await new Promise((resolve, reject) => { let res = await new Promise((resolve, reject) => {
Ffmpeg(videoPath) Ffmpeg(videoPath)
.inputOptions([`-ss ${MillisecondsToTimeString(frameTime)}`]) .inputOptions([`-ss ${MillisecondsToTimeString(Math.ceil(frameTime))}`])
.output(outFramePath) .output(outFramePath)
.frames(1) .frames(1)
.on('end', async function () { .on('end', async function () {

View File

@ -0,0 +1,216 @@
import path from 'path';
import { CheckFileOrDirExist, DeleteFolderAllFile } from '../../../define/Tools/file';
import fs from "fs";
import { ValidateJson } from '../../../define/Tools/validate';
import { FfmpegOptions } from '../ffmpegOptions';
/**
* 稿
*/
export type DraftTimeLineJson = {
name: string;
startTime: number;
endTime: number;
durationTime: number;
middleTime: number;
videoPath: string;
text: string;
framePath: string;
subVideoPath?: string;
audioPath?: string;
}
/**
*
*/
class JianyingService {
draftTimeLine: DraftTimeLineJson[];
draftJson: any;
ffmpegOptions: FfmpegOptions
constructor() {
this.draftTimeLine = [];
this.ffmpegOptions = new FfmpegOptions();
}
/**
* 稿
* @param draftDir 稿
* @param projectDir
* @param packagePath
*/
async GetDraftFrameAndText(draftDir: string, projectDir: string, packagePath: string) {
try {
// 获取草稿文件路径
let draftJsonPath = path.resolve(draftDir, "draft_content.json");
if (!await CheckFileOrDirExist(draftJsonPath)) {
throw new Error("剪映草稿文件不存在,请先检查");
}
// 读取草稿文件内容
let draftJsonString = await fs.promises.readFile(draftJsonPath, "utf-8");
if (!ValidateJson(draftJsonString)) {
throw new Error("剪映草稿文件格式错误,请检查");
}
this.draftJson = JSON.parse(draftJsonString);
// 检查输出文件夹是否存在
let projectTmp = path.resolve(projectDir, "tmp");
if (await CheckFileOrDirExist(projectTmp)) {
// 删除文件夹
await DeleteFolderAllFile(projectTmp);
}
// 创建输出文件夹
let projectInput = path.resolve(projectTmp, "input_crop");
console.log("projectInput", projectInput);
// 获取剪映的轨道并且判断是否包含一个video轨道和一个text轨道
let draftTracks = this.draftJson.tracks;
if (!draftTracks) {
throw new Error("剪映草稿文件格式错误,没有轨道,请检查");
}
let hasVideo = draftTracks.some((track: any) => track.type === "video");
let hasText = draftTracks.some((track: any) => track.type === "text");
if (!(this.draftJson.tracks && hasVideo && hasText)) {
throw new Error("没有检测到剪映草稿文件中的video和text轨道请检查");
}
// 获取视频节点
let videoNodes = draftTracks.filter((track: any) => track.type === "video")[0].segments;
this.GetVideoTime(videoNodes);
// 获取文本节点
let textNodes = draftTracks.filter((track: any) => track.type === "text")[0].segments;
this.GetTextTime(textNodes);
console.log("场景数:", this.draftTimeLine.length);
// 将数据写入到文件中
let txtData = this.draftTimeLine.map((item) => item.text);
let txtPath = path.resolve(projectDir, "文案.txt");
await fs.promises.writeFile(txtPath, txtData.join("\n"), "utf-8");
// 开始抽取关键帧
await this.GetDraftFrame(projectInput);
// 将数据写入到json文件中
let jsonPath = path.resolve(projectDir, "draftFrameData.json");
await fs.promises.writeFile(jsonPath, JSON.stringify(this.draftTimeLine), "utf-8");
console.log("GetDraftFrameAndText", jsonPath);
} catch (error) {
throw error;
}
}
/**
*
* @param nodes
* @param type
* @param value
* @returns
* @throws
*/
private FindNode(nodes: any[], type: string, value: any) {
let res = nodes.filter((node: any) => node[type] === value);
if (res.length === 0) {
throw new Error("没有找到对应的节点");
}
return res[0];
}
/**
*
* @param draftTimeObject
* @param textStartTime
* @param textEndTile
* @returns true false
*/
private TextIsInTimeLine(draftTimeObject: DraftTimeLineJson, textStartTime: number, textEndTile: number) {
return textStartTime >= draftTimeObject.startTime && textEndTile <= draftTimeObject.endTime;
}
/**
* 稿
* @param projectInput
*/
private async GetDraftFrame(projectInput: string): Promise<void> {
for (let i = 0; i < this.draftTimeLine.length; i++) {
const element = this.draftTimeLine[i];
let outImagePath = path.resolve(projectInput, (i + 1).toString().padStart(5, "0") + ".png");
// 使用 ffmpeg 抽取关键帧
let frameRes = await this.ffmpegOptions.FfmpegGetFrame(element.middleTime / 1000, element.videoPath, outImagePath);
if (frameRes.code == 0) {
throw new Error(frameRes.message);
}
this.draftTimeLine[i].framePath = outImagePath;
console.log("已经抽取第", i + 1, "帧");
}
}
/**
*
* @param textNodes
*/
private GetTextTime(textNodes: any[]): void {
let tempText = "";
let count = 0;
for (let i = 0; i < textNodes.length; i++) {
const element = textNodes[i];
let textStartTime = element.target_timerange.start;
let textEndTime = textStartTime + element.target_timerange.duration;
let textMaterialId = element.material_id;
let textMaterialNode = this.FindNode(this.draftJson.materials.texts, "id", textMaterialId);
let textContent = textMaterialNode.content;
let textContentJson = JSON.parse(textContent);
let text = textContentJson.text + "。";
// 不在视频的时间轴内,丢弃
if (count > this.draftTimeLine.length - 1) {
break;
}
if (this.TextIsInTimeLine(this.draftTimeLine[count], textStartTime, textEndTime)) {
tempText += text;
if (i == textNodes.length - 1) {
this.draftTimeLine[count].text = tempText;
}
} else {
this.draftTimeLine[count].text = tempText;
tempText = text;
count += 1;
}
}
}
/**
*
* @param videoNodes
*/
private GetVideoTime(videoNodes: any[]): void {
for (let i = 0; i < videoNodes.length; i++) {
const element = videoNodes[i];
let startTime = element.target_timerange.start;
let endTime = startTime + element.target_timerange.duration;
let durationTime = element.target_timerange.duration;
let middleTime = startTime + ((endTime - startTime) / 2);
let videoId = element.material_id;
let materialNode = this.FindNode(this.draftJson.materials.videos, "id", videoId);
let videoPath = materialNode.path;
this.draftTimeLine.push({
name: (i + 1).toString().padStart(5, "0"),
startTime,
endTime,
durationTime,
middleTime,
videoPath,
text: "",
framePath: undefined
})
}
}
}
export default JianyingService;

View File

@ -4,7 +4,7 @@ import { LoggerStatus, OtherData } from '../../../define/enum/softwareEnum'
import { successMessage, errorMessage } from '../../Public/generalTools' import { successMessage, errorMessage } from '../../Public/generalTools'
import { GeneralResponse } from '../../../model/generalResponse' import { GeneralResponse } from '../../../model/generalResponse'
export class TaskScheduler { export class LogScheduler {
constructor() { } constructor() { }
/** /**
* *
@ -20,7 +20,7 @@ export class TaskScheduler {
type: string, type: string,
content: string, content: string,
bookTaskId: string, bookTaskId: string,
status = LoggerStatus.DOING status: LoggerStatus = LoggerStatus.DOING
): Promise<GeneralResponse.ErrorItem | GeneralResponse.SuccessItem> { ): Promise<GeneralResponse.ErrorItem | GeneralResponse.SuccessItem> {
try { try {
let log = { let log = {
@ -38,7 +38,16 @@ export class TaskScheduler {
return res return res
} catch (error) { } catch (error) {
return errorMessage(error.message, 'TaskScheduler_AddLogToDB') return errorMessage(error.message, 'LogScheduler_AddLogToDB')
} }
} }
/**
*
* @param {*} log
* @returns
*/
ReturnLogger(log: GeneralResponse.ErrorItem | GeneralResponse.SuccessItem) {
global.newWindow[0].win.webContents.send(DEFINE_STRING.SYSTEM.RETURN_LOGGER, log)
}
} }

View File

@ -0,0 +1,333 @@
import path from 'path';
import { CheckFileOrDirExist, DeleteFolderAllFile } from '../../../define/Tools/file';
import fs from 'fs';
import { LogScheduler } from '../task/logScheduler';
import { successMessage } from '../../Public/generalTools';
import { define } from '../../../define/define';
import util from 'util';
import { exec } from 'child_process';
import { ValidateJson } from '../../../define/Tools/validate';
import { DraftTimeLineJson } from '../jianying/jianyingService';
const execAsync = util.promisify(exec)
import { FfmpegOptions } from '../ffmpegOptions';
import { TimeStringToMilliseconds } from '../../../define/Tools/time';
import { isEmpty } from 'lodash';
import { Subtitle } from '../Subtitle/subtitle';
type VideoHandleShortVideoTimeLine = {
name: string,
startTime: number;
endTime: number;
videoPath: string,
duration: number
}
class VideoHandle {
logScheduler: LogScheduler;
ffmpegOptions: FfmpegOptions;
subtitle: Subtitle;
constructor() {
this.logScheduler = new LogScheduler()
this.ffmpegOptions = new FfmpegOptions();
this.subtitle = new Subtitle();
}
public async StartStoryboarding(videoPath: string, sensitivity: number) {
// 检查抽帧文件是不是存在
let framePath = path.resolve(global.config.project_path, "data/frame");
if (await CheckFileOrDirExist(framePath)) {
await DeleteFolderAllFile(framePath);
} else {
await fs.promises.mkdir(framePath, { recursive: true })
}
// 检查输入文件是不是存在
let inputPath = path.resolve(global.config.project_path, "tmp/input_crop");
if (await CheckFileOrDirExist(inputPath)) {
await DeleteFolderAllFile(inputPath);
} else {
await fs.promises.mkdir(inputPath, { recursive: true })
}
// 检查本事localwhisper是不是存在
let localwhisperPath = path.resolve(define.scripts_path, "localWhisper/local_whisper.exe");
if (!await CheckFileOrDirExist(localwhisperPath)) {
throw new Error('localwhisper 不存在请查看文档安装localwhisper插件环境');
}
// 判断输出文件是不是存在,存在删除
let frameJson = path.resolve(global.config.project_path, "data/frameTimeLine.json");
if (await CheckFileOrDirExist(frameJson)) {
await fs.promises.unlink(frameJson);
}
// 开始计算分镜
let frameTimeList = await this.ComputedFrameTime(videoPath, frameJson, sensitivity);
// 开始对视频进行切割
// 先计算时间点
let shortVideo = [] as VideoHandleShortVideoTimeLine[];
shortVideo = await this.VideoShortClip(frameTimeList, videoPath, 0.5 * 60 * 1000);
// 检查长度
if (shortVideo.length != frameTimeList.length) {
throw new Error('分镜数据和切割视频数据不一致,请检查');
}
// 开始切割视频
console.log(shortVideo);
let subVideoPath = await this.CutViodeToShortClip(shortVideo, frameTimeList) as string[];
// 开始抽帧
await this.GetFrameFromCutVideo(shortVideo, inputPath, subVideoPath, frameTimeList);
// 开始分离音频
await this.SplitAudio(frameTimeList, framePath);
// 开始提取字幕
await this.subtitle.GetTextByLocalWhisper(frameTimeList, framePath, framePath, localwhisperPath);
console.log(frameTimeList);
await fs.promises.writeFile(frameJson, JSON.stringify(frameTimeList), 'utf-8');
}
/**
*
* @param frameTimeList
* @param framePath
*/
async SplitAudio(frameTimeList: DraftTimeLineJson[], framePath: string) {
for (let i = 0; i < frameTimeList.length; i++) {
const element = frameTimeList[i];
if (isEmpty(element.subVideoPath)) {
throw new Error('没有找到待分离的视频数据,请检查');
}
if (!await CheckFileOrDirExist(element.subVideoPath)) {
throw new Error(`视频片段 ${element.subVideoPath} 不存在,请检查`);
}
let audioPath = path.resolve(framePath, `${element.name}.mp3`);
if (await CheckFileOrDirExist(audioPath)) {
await fs.promises.unlink(audioPath);
}
let res = await this.ffmpegOptions.FfmpegExtractAudio(element.subVideoPath, audioPath)
if (res.code == 0) {
throw new Error(res.message);
}
if (!await CheckFileOrDirExist(audioPath)) {
throw new Error(`分离音频 ${audioPath} 失败,没有找到分离后的音频文件,请检查`);
}
element.audioPath = audioPath;
}
}
async GetFrameFromCutVideo(shortVideo: VideoHandleShortVideoTimeLine[], inputPath: string, subVideoPath: string[], frameTimeList: DraftTimeLineJson[]) {
if (shortVideo.length != subVideoPath.length) {
throw new Error('视频片段和分镜数据不一致');
}
if (shortVideo.length != frameTimeList.length) {
throw new Error('视频片段和分镜数据不一致');
}
let imagePath = [] as string[];
for (let i = 0; i < shortVideo.length; i++) {
const element = shortVideo[i];
if (!frameTimeList[i]) {
throw new Error('分镜数据和切割视频数据不一致,请检查');
}
let middleTime = element.startTime + ((element.endTime - element.startTime) / 2);
let outImagePath = path.resolve(inputPath, `${element.name}.png`);
let res = await this.ffmpegOptions.FfmpegGetFrame(middleTime, element.videoPath, outImagePath);
if (res.code == 0) {
throw new Error(res.message);
}
imagePath.push(outImagePath);
// 检查图片是否存在
if (!await CheckFileOrDirExist(outImagePath)) {
throw new Error(`抽取的图片 ${outImagePath} 不存在,请检查`);
}
frameTimeList[i].framePath = outImagePath;
}
return imagePath;
}
/**
*
* @param shortVideo
*/
async CutViodeToShortClip(shortVideo: VideoHandleShortVideoTimeLine[], frameTimeList: DraftTimeLineJson[]): Promise<string[]> {
let subVideoPaths = [] as string[];
for (let i = 0; i < shortVideo.length; i++) {
const element = shortVideo[i];
if (!frameTimeList[i]) {
throw new Error('分镜数据和切割视频数据不一致,请检查');
}
let subVideoPath = path.resolve(global.config.project_path, `data/frame/${element.name}.mp4`);
// 开始截取视频
let res = await this.ffmpegOptions.FfmpegCutVideo(
element.startTime,
element.endTime,
element.videoPath,
subVideoPath
)
subVideoPaths.push(subVideoPath);
if (res.code == 0) {
throw new Error(res.message);
}
if (!await CheckFileOrDirExist(subVideoPath)) {
throw new Error(`截取视频片段 ${subVideoPath} 不存在,请检查`);
}
frameTimeList[i].subVideoPath = subVideoPath;
}
return subVideoPaths;
}
/**
*
* @param frameTimeList 线json数组
* @param videoPath
* @param duration
* @returns
*/
public async VideoShortClip(frameTimeList: any[], videoPath: string, duration: number): Promise<VideoHandleShortVideoTimeLine[]> {
let shortVideo = [] as VideoHandleShortVideoTimeLine[];
let durationTime = 0; // 小视频片段的持续时间
// let duration = 5 * 60 * 1000; // 5分钟
let tempCount = 0;
let shotVideoPath = path.resolve(global.config.project_path, `data/temp_frame_${tempCount}.mp4`); // 新的视频路径
let startTime = 0; // 开始时间
let endTime = 0; // 结束时间
let lastEndTime = 0; // 上一个结束时间
for (let i = 0; i < frameTimeList.length; i++) {
const item = frameTimeList[i];
let temRes = {
name: (i + 1).toString().padStart(5, "0"),
startTime: item.startTime - lastEndTime,
endTime: item.endTime - lastEndTime,
videoPath: shotVideoPath,
duration: item.endTime - item.startTime
}
endTime = item.endTime;
durationTime += item.endTime - item.startTime;
if (durationTime > duration) { // 判断条件切割视频
// 开始切割视频
let res = await this.ffmpegOptions.FfmpegCutVideo(
startTime,
endTime,
videoPath,
shotVideoPath
)
if (res.code == 0) {
throw new Error(res.message);
}
lastEndTime = item.endTime;
tempCount++;
durationTime = 0;
startTime = endTime;
endTime = 0;
shotVideoPath = path.resolve(global.config.project_path, `data/temp_frame_${tempCount}.mp4`);
}
shortVideo.push(temRes)
}
// 最后一个也要切割
if (durationTime > 0) {
let res = await this.ffmpegOptions.FfmpegCutVideo(
startTime,
endTime,
videoPath,
shotVideoPath
)
if (res.code == 0) {
throw new Error(res.message);
}
}
// 将数据写出
let shortVideoJson = path.resolve(global.config.project_path, "data/shortVideo.json");
await fs.promises.writeFile(shortVideoJson, JSON.stringify(shortVideo), 'utf-8');
return shortVideo;
}
/**
*
*
* @param {string} videoPath -
* @param {number} sensitivity -
* @throws {Error}
*
* @remarks
*
*
*
*
*
*/
public async ComputedFrameTime(videoPath: string, frameJson: string, sensitivity: number): Promise<DraftTimeLineJson[]> {
if (!await CheckFileOrDirExist(videoPath)) {
throw new Error('视频文件不存在,请检查');
}
this.logScheduler.ReturnLogger(successMessage(null, "前置检查结束,开始进行分镜", "VideoHandle_StartStoryboarding"));
// 开始调用分镜
let command = `"${path.join(
define.scripts_path,
'Lai.exe'
)}" "-ka" "${videoPath}" "${frameJson}" "${sensitivity}"`
const output = await execAsync(command, {
maxBuffer: 1024 * 1024 * 10,
encoding: 'utf-8'
})
// 有错误输出
if (output.stderr != '') {
let error_msg = `分镜成功,但有警告提示:${output.stderr}`
this.logScheduler.ReturnLogger(successMessage(null, error_msg, "VideoHandle_StartStoryboarding"));
}
// 分镜成功,处理输出
let josnIsExist = await CheckFileOrDirExist(frameJson);
if (!josnIsExist) {
let error_message = `分镜失败,没有找到对应的分镜输出文件:${frameJson}`
this.logScheduler.ReturnLogger(successMessage(null, error_message, "VideoHandle_StartStoryboarding"));
throw new Error(error_message);
}
let frameJsonDataString = await fs.promises.readFile(frameJson, 'utf-8');
let res = ValidateJson(frameJsonDataString);
if (!res) {
throw new Error('分镜数据不是有效的JSON格式请检查');
}
let frameJsonData = JSON.parse(frameJsonDataString);
if (frameJsonData.length <= 0) {
let error_msg = `分镜失败,没有找到对应的分镜数据`
this.logScheduler.ReturnLogger(successMessage(null, error_msg, "VideoHandle_StartStoryboarding"));
throw new Error(error_msg);
}
let result = [] as DraftTimeLineJson[];
// 这边将分镜的数据进行一个处理
for (let i = 0; i < frameJsonData.length; i++) {
const element = frameJsonData[i];
let st = TimeStringToMilliseconds(element[0]);
let et = TimeStringToMilliseconds(element[1]);
let tempObject = {
name: (i + 1).toString().padStart(5, "0"),
startTime: st,
endTime: et,
middleTime: st + ((et - st) / 2),
videoPath: videoPath,
framePath: '',
text: "",
durationTime: et - st
} as DraftTimeLineJson;
result.push(tempObject);
}
return result;
}
}
export default VideoHandle;

View File

@ -13,7 +13,7 @@ import { define } from '../../define/define'
import { LOGGER_DEFINE } from '../../define/logger_define' import { LOGGER_DEFINE } from '../../define/logger_define'
import axios from 'axios' import axios from 'axios'
import { Base64ToFile, GetImageBase64 } from '../../define/Tools/image' import { Base64ToFile, GetImageBase64 } from '../../define/Tools/image'
import { TaskScheduler } from './task/taskScheduler'; import { LogScheduler } from './task/logScheduler';
import { LoggerStatus, OtherData, ResponseMessageType } from '../../define/enum/softwareEnum'; import { LoggerStatus, OtherData, ResponseMessageType } from '../../define/enum/softwareEnum';
import { basicApi } from '../../api/apiBasic'; import { basicApi } from '../../api/apiBasic';
import { FfmpegOptions } from './ffmpegOptions'; import { FfmpegOptions } from './ffmpegOptions';
@ -28,7 +28,7 @@ import { BookTaskService } from '../../define/db/service/Book/bookTaskService';
export class Watermark { export class Watermark {
softwareService: SoftwareService softwareService: SoftwareService
taskScheduler: TaskScheduler; logScheduler: LogScheduler;
bookService: BookService bookService: BookService
bookTaskDetailService: BookTaskDetailService bookTaskDetailService: BookTaskDetailService
bookTaskService: BookTaskService bookTaskService: BookTaskService
@ -39,8 +39,8 @@ export class Watermark {
if (!this.softwareService) { if (!this.softwareService) {
this.softwareService = await SoftwareService.getInstance() this.softwareService = await SoftwareService.getInstance()
} }
if (!this.taskScheduler) { if (!this.logScheduler) {
this.taskScheduler = new TaskScheduler() this.logScheduler = new LogScheduler()
} }
if (!this.bookService) { if (!this.bookService) {
this.bookService = await BookService.getInstance() this.bookService = await BookService.getInstance()
@ -449,7 +449,7 @@ export class Watermark {
}, DEFINE_STRING.BOOK.REMOVE_WATERMARK_RETURN) }, DEFINE_STRING.BOOK.REMOVE_WATERMARK_RETURN)
this.taskScheduler.AddLogToDB(book.id, book.type, `${element.name} 去除水印完成`, element.bookTaskId, LoggerStatus.SUCCESS) this.logScheduler.AddLogToDB(book.id, book.type, `${element.name} 去除水印完成`, element.bookTaskId, LoggerStatus.SUCCESS)
} }
// 全部完毕 // 全部完毕
if (operateBookType == OperateBookType.BOOKTASKDETAIL) { if (operateBookType == OperateBookType.BOOKTASKDETAIL) {

File diff suppressed because it is too large Load Diff

View File

@ -73,9 +73,8 @@ const api = {
}, },
// 分镜语音识别消息 // 分镜语音识别消息
StartStoryboarding: async (value) => { StartStoryboarding: async (value) =>
let res = await ipcRenderer.invoke(DEFINE_STRING.START_STORY_BOARDING, value) await ipcRenderer.invoke(DEFINE_STRING.START_STORY_BOARDING, value),
},
// 获取设置的初始数据 // 获取设置的初始数据
getSettingDafultData: async (callback) => getSettingDafultData: async (callback) =>

View File

@ -37,7 +37,6 @@
label-placement="left" label-placement="left"
inline inline
:model="frameValue" :model="frameValue"
:rules="rules"
size="medium" size="medium"
> >
<n-form-item label="分割敏感度" path="sensitivity" style="width: 300px"> <n-form-item label="分割敏感度" path="sensitivity" style="width: 300px">
@ -68,7 +67,7 @@
<n-code style="padding: 0; font-size: small" :code="code" language="js" word-wrap /> <n-code style="padding: 0; font-size: small" :code="code" language="js" word-wrap />
</template> </template>
<script> <script setup>
import { defineComponent, ref, onMounted, toRaw } from 'vue' import { defineComponent, ref, onMounted, toRaw } from 'vue'
import { import {
NSelect, NSelect,
@ -83,39 +82,26 @@ import {
NForm, NForm,
NFormItem, NFormItem,
NSlider, NSlider,
NInput NInput,
useDialog
} from 'naive-ui' } from 'naive-ui'
import { DEFINE_STRING } from '../../../../define/define_string' import { DEFINE_STRING } from '../../../../define/define_string'
export default defineComponent({
components: {
NSelect,
NButton,
NSpin,
NDivider,
NCode,
NTabs,
NTabPane,
NSpace,
NForm,
NFormItem,
NSlider,
NInput
},
setup() {
let options = []
let out_dir = ref(null)
let selectedValue = ref(null)
let show = ref(false)
let code = ref('')
const message = useMessage()
let storyLoading = ref(false)
let frameValue = ref({ let options = []
let out_dir = ref(null)
let selectedValue = ref(null)
let show = ref(false)
let code = ref('')
const message = useMessage()
let storyLoading = ref(false)
let dialog = useDialog()
let frameValue = ref({
sensitivity: 30, sensitivity: 30,
video_path: null video_path: null
}) })
onMounted(() => { onMounted(() => {
window.api.getDraftFileList((value) => { window.api.getDraftFileList((value) => {
value.forEach((element) => { value.forEach((element) => {
let obj = { let obj = {
@ -138,9 +124,9 @@ export default defineComponent({
} }
code.value = code.value + '\n' + value.data code.value = code.value + '\n' + value.data
}) })
}) })
async function getFrameFunc(e) { async function getFrameFunc(e) {
out_dir = window.config.project_path out_dir = window.config.project_path
if (selectedValue.value == null || selectedValue.value == undefined) { if (selectedValue.value == null || selectedValue.value == undefined) {
message.error('请选择剪映草稿和输出草稿') message.error('请选择剪映草稿和输出草稿')
@ -158,9 +144,9 @@ export default defineComponent({
code.value = value.data code.value = value.data
show.value = false show.value = false
}) })
} }
function selectExportFolder(e) { function selectExportFolder(e) {
window.api.selectFolder(null, (value) => { window.api.selectFolder(null, (value) => {
if (value.length <= 0) { if (value.length <= 0) {
message.error('必须选择输出文件夹位置') message.error('必须选择输出文件夹位置')
@ -168,12 +154,12 @@ export default defineComponent({
} }
out_dir.value = value[0] out_dir.value = value[0]
}) })
} }
/** /**
* 选择指定的视频文件 * 选择指定的视频文件
*/ */
async function GetVideoFile() { async function GetVideoFile() {
await window.api.SelectFile(['mp4'], (value) => { await window.api.SelectFile(['mp4'], (value) => {
if (value.code == 0) { if (value.code == 0) {
message.error(value.message) message.error(value.message)
@ -181,12 +167,12 @@ export default defineComponent({
} }
frameValue.value.video_path = value.value frameValue.value.video_path = value.value
}) })
} }
/** /**
* 开始分镜执行分镜任务 * 开始分镜执行分镜任务
*/ */
async function StartStoryboarding() { async function StartStoryboarding() {
storyLoading.value = true storyLoading.value = true
if (frameValue.value.video_path == null) { if (frameValue.value.video_path == null) {
message.error('选择分镜的视频地址') message.error('选择分镜的视频地址')
@ -202,42 +188,36 @@ export default defineComponent({
return return
} }
await window.api.StartStoryboarding(toRaw(frameValue.value)) let res = await window.api.StartStoryboarding(toRaw(frameValue.value))
if (res.code == 0) {
dialog.error({
title: '抽帧错误输出',
content: res.message
})
return
} else {
dialog.success({
title: '抽帧成功',
content: '视频分镜,抽帧,问题提取都已完成'
})
} }
}
/** /**
* 打开环境安装网站 * 打开环境安装网站
*/ */
function OpenTeachDoc() { function OpenTeachDoc() {
window.api.OpenUrl( window.api.OpenUrl(
'https://pvwu1oahp5m.feishu.cn/docx/VrBVd2KUDosmNfxat3OceWuInjd?from=from_copylink' 'https://pvwu1oahp5m.feishu.cn/docx/VrBVd2KUDosmNfxat3OceWuInjd?from=from_copylink'
) )
} }
function openExportFolder() { function openExportFolder() {
window.system.OpenFolder({ window.system.OpenFolder({
folderPath: 'tmp/input_crop', folderPath: 'tmp/input_crop',
baseProject: true baseProject: true
}) })
} }
return {
selectedValue,
options,
out_dir,
getFrameFunc,
selectExportFolder,
show,
code,
frameValue,
GetVideoFile,
StartStoryboarding,
OpenTeachDoc,
storyLoading,
openExportFolder
}
}
})
</script> </script>
<style> <style>

View File

@ -47,7 +47,6 @@ export default defineComponent({
onMounted(async () => { onMounted(async () => {
// //
window.api.setEventListen([DEFINE_STRING.SYSTEM.RETURN_LOGGER], (value) => { window.api.setEventListen([DEFINE_STRING.SYSTEM.RETURN_LOGGER], (value) => {
debuger
if (value.code == 0) { if (value.code == 0) {
message.error('添加日志输出失败,但是不影响使用') message.error('添加日志输出失败,但是不影响使用')
logger.value += value.message + '\n' logger.value += value.message + '\n'

View File

@ -1,9 +1,8 @@
<template> <template>
<div style="width: 100%; height: 100%"> <div style="width: 100%; height: 100%">
<div <div style="font-size: 15px; display: flex; justify-content: center; width: 100%; height: 100%">
id="showmessage" <div id="showmessage"></div>
style="font-size: 15px; display: flex; justify-content: center; width: 100%; height: 100%" </div>
></div>
</div> </div>
</template> </template>
@ -34,6 +33,7 @@ async function GetRemoteSystemInformation() {
iframe.style.padding = '0' iframe.style.padding = '0'
iframe.style.height = '98vh' // Adjust the height as needed iframe.style.height = '98vh' // Adjust the height as needed
showMessageDiv.innerHTML = '' showMessageDiv.innerHTML = ''
showMessageDiv.style.width = '100%'
showMessageDiv.appendChild(iframe) showMessageDiv.appendChild(iframe)
} else { } else {
showMessageDiv.innerHTML = remoteHomePage showMessageDiv.innerHTML = remoteHomePage