From f4d042f69903b889cb25db33bc1728d88e264935 Mon Sep 17 00:00:00 2001 From: lq1405 <2769838458@qq.com> Date: Sun, 20 Oct 2024 23:19:22 +0800 Subject: [PATCH] =?UTF-8?q?V=203.1.7=201.=20=E7=A7=BB=E9=99=A4=E8=BD=AF?= =?UTF-8?q?=E4=BB=B6=E5=8C=85=E8=87=AA=E5=B8=A6=E7=9A=84=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=20whisper=EF=BC=88=E9=9C=80=E5=8D=95=E7=8B=AC=E5=AE=89?= =?UTF-8?q?=E8=A3=85=EF=BC=89=202.=20=E9=87=8D=E6=9E=84=E7=89=88=E6=9C=AC?= =?UTF-8?q?=E5=BA=95=E5=B1=82=E4=BE=9D=E8=B5=96=EF=BC=8C=E7=A7=BB=E9=99=A4?= =?UTF-8?q?=E5=A4=96=E9=83=A8=E4=BE=9D=E8=B5=96=203.=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=20=E9=A6=96=E9=A1=B5=20=E6=9A=97=E9=BB=91=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E4=B8=8D=E5=85=BC=E5=AE=B9=E7=9A=84=E9=97=AE=E9=A2=98=204.=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20SD=20=E5=90=88=E5=B9=B6=E6=8F=90=E7=A4=BA?= =?UTF-8?q?=E8=AF=8D=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 10 + package-lock.json | 4 +- package.json | 5 +- resources/scripts/Lai.py | 28 +- resources/scripts/Lai.spec | 23 +- .../scripts/{lama_inpaint.spec => Lai_2.spec} | 26 +- .../scripts/__pycache__/clip.cpython-310.pyc | Bin 11432 -> 11432 bytes .../iamge_to_video.cpython-310.pyc | Bin 8949 -> 8949 bytes .../__pycache__/shotSplit.cpython-310.pyc | Bin 6127 -> 4048 bytes resources/scripts/db/book.realm.lock | Bin 1416 -> 1416 bytes resources/scripts/db/software.realm.lock | Bin 1416 -> 1416 bytes resources/scripts/db/tts.realm.lock | Bin 1416 -> 1416 bytes resources/scripts/localWhisper/install.bat | 2 + .../scripts/localWhisper/local_whisper.py | 170 ++ .../scripts/localWhisper/local_whisper.spec | 50 + .../scripts/localWhisper/public_tools.py | 351 ++++ resources/scripts/localWhisper/shotSplit.py | 307 +++ resources/scripts/requirements.txt | 298 --- resources/scripts/shotSplit.py | 222 +-- src/main/Service/Book/ReverseBook.ts | 8 +- src/main/Service/Book/basicReverse.ts | 48 +- src/main/Service/Book/bookFrame.ts | 11 +- src/main/Service/Book/bookVideo.ts | 6 + src/main/Service/MJ/mj.ts | 8 +- src/main/Service/SD/sd.ts | 14 +- .../Service/ServiceBasic/bookServiceBasic.ts | 2 +- src/main/Service/Subtitle/subtitle.ts | 104 +- src/main/Service/Subtitle/subtitleService.ts | 4 +- src/main/Service/ffmpegOptions.ts | 4 +- src/main/Service/jianying/jianyingService.ts | 216 ++ .../{taskScheduler.ts => logScheduler.ts} | 15 +- src/main/Service/videoService/videoHandle.ts | 333 ++++ src/main/Service/watermark.ts | 10 +- src/main/func.js | 1740 ++++++++--------- src/preload/index.js | 5 +- .../src/components/Backstep/GetFrame.vue | 284 ++- .../ManageBook/ManageBookShowLogger.vue | 1 - .../src/components/Home/ShowMessage.vue | 8 +- 38 files changed, 2749 insertions(+), 1568 deletions(-) rename resources/scripts/{lama_inpaint.spec => Lai_2.spec} (61%) create mode 100644 resources/scripts/localWhisper/install.bat create mode 100644 resources/scripts/localWhisper/local_whisper.py create mode 100644 resources/scripts/localWhisper/local_whisper.spec create mode 100644 resources/scripts/localWhisper/public_tools.py create mode 100644 resources/scripts/localWhisper/shotSplit.py delete mode 100644 resources/scripts/requirements.txt create mode 100644 src/main/Service/jianying/jianyingService.ts rename src/main/Service/task/{taskScheduler.ts => logScheduler.ts} (78%) create mode 100644 src/main/Service/videoService/videoHandle.ts diff --git a/.gitignore b/.gitignore index 98a05a8..8cb6184 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,13 @@ resources/config* *.log* resources/scripts/db/book.realm.lock resources/scripts/db/software.realm.lock +resources/scripts/localWhisper/__pycache__/* +resources/scripts/laitool/.venv +resources/scripts/laitool/build +resources/scripts/laitool/dist +resources/scripts/localWhisper/build/* +resources/scripts/__pycache__/* +resources/scripts/db/* +resources/scripts/localWhisper/.venv/* +resources/scripts/localWhisper/_internal/* +resources/scripts/localWhisper/local_whisper.exe diff --git a/package-lock.json b/package-lock.json index b53f840..7b3a63f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "laitool", - "version": "3.1.6", + "version": "3.1.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "laitool", - "version": "3.1.6", + "version": "3.1.7", "hasInstallScript": true, "dependencies": { "@alicloud/alimt20181012": "^1.2.0", diff --git a/package.json b/package.json index 4ec01eb..1aea370 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "laitool", - "version": "3.1.6", + "version": "3.1.7", "description": "An AI tool for image processing, video processing, and other functions.", "main": "./out/main/index.js", "author": "laitool.cn", @@ -84,6 +84,7 @@ "resources/image/style/**", "resources/image/zhanwei.png", "resources/scripts/model/**", + "resources/scripts/Lai.exe", "resources/scripts/discordScript.js", "resources/tmp/**", "resources/icon.ico" @@ -92,4 +93,4 @@ "icon": "./resources/icon.ico" } } -} +} \ No newline at end of file diff --git a/resources/scripts/Lai.py b/resources/scripts/Lai.py index be3a4d4..bdadd4e 100644 --- a/resources/scripts/Lai.py +++ b/resources/scripts/Lai.py @@ -4,7 +4,6 @@ import json import os import sys import clip -import getgrame import Push_back_Prompt import public_tools import shotSplit @@ -56,6 +55,8 @@ if sys.argv[1] == "-c": clip = clip.Clip(cript_directory, sys.argv[2], sys.argv[3]) clip.MergeVideosAndClip() pass + + # 获取字体 elif sys.argv[1] == "-f": # 获取本地已安装的字幕。然后返回 @@ -77,22 +78,17 @@ elif sys.argv[1] == "-p": Push_back_Prompt.init(sys.argv[2], sys.argv[3], sys.argv[4]) pass -# 剪映抽帧 -elif sys.argv[1] == "-k": - # print("") - getgrame.init(sys.argv[2], sys.argv[3], sys.argv[4]) - pass - elif sys.argv[1] == "-ka": shotSplit.get_fram(sys.argv[2], sys.argv[3], sys.argv[4]) pass -# 智能分镜。字幕识别 -elif sys.argv[1] == "-a": - print("开始算法分镜:" + sys.argv[2] + " -- 输出文件夹:" + sys.argv[3]) - shotSplit.init(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]) -# 本地提取音频 -elif sys.argv[1] == "-t": - print("开始提取文字:" + sys.argv[2]) - shotSplit.GetTextTask(sys.argv[2], sys.argv[3], sys.argv[4]) - pass +# # 智能分镜。字幕识别 +# elif sys.argv[1] == "-a": +# print("开始算法分镜:" + sys.argv[2] + " -- 输出文件夹:" + sys.argv[3]) +# shotSplit.init(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6]) + +# # 本地提取音频 +# elif sys.argv[1] == "-t": +# print("开始提取文字:" + sys.argv[2]) +# shotSplit.GetTextTask(sys.argv[2], sys.argv[3], sys.argv[4]) +# pass diff --git a/resources/scripts/Lai.spec b/resources/scripts/Lai.spec index b8e6262..92f47af 100644 --- a/resources/scripts/Lai.spec +++ b/resources/scripts/Lai.spec @@ -1,36 +1,34 @@ # -*- mode: python ; coding: utf-8 -*- -from PyInstaller.building.datastruct import Tree -from PyInstaller.utils.hooks import get_package_paths - -PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1] -datas = [(PACKAGE_DIRECTORY, 'faster_whisper')] - a = Analysis( ['Lai.py'], pathex=[], binaries=[], - datas=datas, + datas=[], hiddenimports=[], hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], noarchive=False, + optimize=0, ) pyz = PYZ(a.pure) exe = EXE( pyz, a.scripts, + a.binaries, + a.datas, [], - exclude_binaries=True, name='Lai', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, + upx_exclude=[], + runtime_tmpdir=None, console=True, disable_windowed_traceback=False, argv_emulation=False, @@ -38,12 +36,3 @@ exe = EXE( codesign_identity=None, entitlements_file=None, ) -coll = COLLECT( - exe, - a.binaries, - a.datas, - strip=False, - upx=True, - upx_exclude=[], - name='Lai', -) diff --git a/resources/scripts/lama_inpaint.spec b/resources/scripts/Lai_2.spec similarity index 61% rename from resources/scripts/lama_inpaint.spec rename to resources/scripts/Lai_2.spec index 521ec45..b8e6262 100644 --- a/resources/scripts/lama_inpaint.spec +++ b/resources/scripts/Lai_2.spec @@ -1,11 +1,17 @@ # -*- mode: python ; coding: utf-8 -*- +from PyInstaller.building.datastruct import Tree +from PyInstaller.utils.hooks import get_package_paths + +PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1] +datas = [(PACKAGE_DIRECTORY, 'faster_whisper')] + a = Analysis( - ['lama_inpaint.py'], + ['Lai.py'], pathex=[], binaries=[], - datas=[], + datas=datas, hiddenimports=[], hookspath=[], hooksconfig={}, @@ -18,16 +24,13 @@ pyz = PYZ(a.pure) exe = EXE( pyz, a.scripts, - a.binaries, - a.datas, [], - name='lama_inpaint', + exclude_binaries=True, + name='Lai', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, - upx_exclude=[], - runtime_tmpdir=None, console=True, disable_windowed_traceback=False, argv_emulation=False, @@ -35,3 +38,12 @@ exe = EXE( codesign_identity=None, entitlements_file=None, ) +coll = COLLECT( + exe, + a.binaries, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='Lai', +) diff --git a/resources/scripts/__pycache__/clip.cpython-310.pyc b/resources/scripts/__pycache__/clip.cpython-310.pyc index c422debf68fb5f508824fb506968e594f2f306d6..8d6060975786dd31b1fcfda90cd581b5a6e85b44 100644 GIT binary patch delta 20 acmZ1xxgwG~pO=@50SI`zwr}K~s{;T!1qE~f delta 20 acmZ1xxgwG~pO=@50SNxf)NbUSs{;T$?FGI7 diff --git a/resources/scripts/__pycache__/iamge_to_video.cpython-310.pyc b/resources/scripts/__pycache__/iamge_to_video.cpython-310.pyc index 9b2cfa71d6d67744c3b1fc733989624142625427..96360e9a6742773e474690bfd6881ab05111f917 100644 GIT binary patch delta 20 acmezB`qh;?pO=@50SFqFZ`{cJN(lf)=mxU@ delta 20 acmezB`qh;?pO=@50SNxf)NbT{r33&*8wPU# diff --git a/resources/scripts/__pycache__/shotSplit.cpython-310.pyc b/resources/scripts/__pycache__/shotSplit.cpython-310.pyc index 39e12bbdeab1be888a6acdc1bd02f50178799241..2e86e997b55b9c3f9257260a096926a0db4b1590 100644 GIT binary patch delta 805 zcmY+B&ubGw6vuax-DI-aB%3Db4}YXF{$kOzXtA`R7)3%sdJvQfdkINr6IVCcFxdq3 z5(Q5l#IPq@K|xV3o&xGYPyPdX687FgPwm;0?`;BYX5jN?-kUe`zHbf|f6vG5SWFgR zynB{v+`ll0pCYdYmo`h1rRWM1SjdVs<9eJ3EX?S0K~JyEOpUKMTJ703OM^Un4he&B4+OT1JaU z(21&e6rmtu08ir8R>SZ@b(=S<4nKi`lNb;m+GdL<-BtNw5ndLwG*5xiWxMNC>bAuU zuB&FV+Au0d9-hWj+Guwx&f~VhGw!yW&rE}p=MY%AUnLayM_!&#J{A2y%I>%5$3Dt< zA}%!BOZ*%roJZu*87%Z#fND(_uUuVu#0wZ%0Nm3EUj(bkBdL=rk7V%`%t#_u5k<_A zbhT$P!yXH{jM{h!B07Z{Dwf%CJklF&+EwSl=&!~fMq@Y0x;vZxOXl6fbXmooA8YqD zx17-me&;Cm$!xLR#F)5pC$ki~15qvQzMg*B_y5EfbQ|-qZck}2DiF6yMpyUayb1xtyew(o!fEqy>s7D&;7L5Kzj|5MWhUHlDS!>Duegu9GHO z<3c#1mLe)kNQeqzfuIqWP=y2t1QLG$d_&r=;Hde;rwTQQb-aqVa$=A_DLJxv5E(72N+cj;YRR-CM! zg*Me$SMJukp)EnR7X`hCCTR-FUYe#EDEnv^%|h8v*U@e$bF_!{LODSDXg`#LG)D)Z z9HPp8AwRf;Q^=DrHEL7KeaJlV^tm^Go>-vd}yy=aYjl6yq?|99AwTn@;alz88shI;(CVJy(Kune4K&89`Xu zjiy_gcP0jsSA)1DV*RRSA#vy3>p=`9$EvU-Hl`5L2+&VpT>zoN%u2}$V?~#hO`mN* z#pS4wCtSP2dYX%gt-G?&knd%E*si%X-zd5cwOE+7%Vx~a< z+zpWpH9t@7Y3u{JfD3*8!}U$T|8Dqu*APJ{33bsID48Wh@X3TYFSP`aWM82om8g7N zoF#RM*A%RQ4mGh3Io=_wD{{<_PpI>aL?E>YW-Uz~m+CS&O09Jikb8uux7U?Gp3njX z^O)U`mH&juo&>$*Uo{VlPD5ehW7)}9J*a0d3B#YA2F_}%u& zQ@?z1?94acEY7{rK01%WsaE^#W6K-&PkTOO>h_}P`4(#oPeo^F*fCkj8lK#-oG9aQ z+BRla7Q`^ltg36+G*pW=WWQOqmXigyQnX9kCOo&YJWvG#<`yi^bD7bp$@uO;t76Ys zEVAsHbPMKK4ctughEO(ZzS|gCko0cu(eS|7fKf0}!WgwC?SiFe9q8s5=-o5iiZgw7 z6Gq5`SqW_J_4d(se*OB@_SZ++9~?XL>C5f850@0M3dNc6r8ANsED$|cwkzNPVklS! ztufQL-HK6ecnh+g7_-bWcOR+_9O4=rMO~XTDY}J4(laPmO$OmZ)eD8QFsShJkr&P) z5kwx_pS-#__j(PF+2R|oES~(R{nDwW*;A1cG|?QRt!cmfX8V<|YQVG&ZrqyN<{$QJ zI9Q~L23zR9Il}t^c1sw3sii;`H9ab*&cMW7r_PywP?HeHYr>V#d0-NJ*!lts;ChlX(In6N+L+HKSlYU zO0)!vKU75{Dv@~&TU4U`F2_}pB`FvwMN|p({qWBaRs%N$3D*C@hs1+R%ZUfBG9(`9 zUx_E8q5n=ii9kl)X{->BatY#5fN;qFAe_g5aL6SHM?%6uzUg4mMkmck=V>MoX!%6;?uhoWEGu-Urd(E(AVV^r}mfhNDYm!|9wY zD3Ath|2jBG9zH3f)2u1|B6}XH`Ss1ynb#Vey!eY_1{(RpLhl-{l}dJ{R5XFA;mOCj zPR~Rama%eQMcg-me&V=#jy=XWG4hq(go9la(iFBGK|ue}WbeNyOVr)KXEn#V9cB%$ f4}g@I0_p(rSouXV)MYiV#??`EP~EBCl*;}M4Ux}U diff --git a/resources/scripts/db/book.realm.lock b/resources/scripts/db/book.realm.lock index 34a99da654d664692b1bdd8b562c6ebc1cf07270..208c139b4e3417207308e79979db4842ce2ff04c 100644 GIT binary patch literal 1416 zcmZQ%8ie8Rd_Lz-S1JhQN>t0RYd(23`OF literal 1416 zcmZQ%lDLnwB2pU~7@c!1sB&uHpSyuhyR51P7;&Dj0>6HT4OBJAq4;OPYtZvt+tSpCb4 zrf$Pt?B=nesnfWKT^%=?x()o;?d3&Nci|lRDVg<|u6FO+-?MT3G VuN9iQ4gC;%5hjhYMneD*0szHHBd`Df diff --git a/resources/scripts/db/tts.realm.lock b/resources/scripts/db/tts.realm.lock index 4fbc855285f3e5826b50d54a3429992c6eefe2c4..8d052bed9551d990a8f05825423c4a2c6b3b468e 100644 GIT binary patch delta 22 acmeC+?%?KSWaMCwVB%u{0mhBI)vN#?b^@gU delta 22 ZcmeC+?%?KSWaMCwVB%u{gN?k^tN") + exit(0) + +if getattr(sys, "frozen", False): + cript_directory = os.path.dirname(sys.executable) +elif __file__: + cript_directory = os.path.dirname(__file__) + + +def GetText(out_folder, mp3_folder): + text = [] + # 先获取模型 + print("正在下载或加载模型") + sys.stdout.flush() + model_path = Path( + hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="preprocessor_config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="tokenizer.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="vocabulary.json", + ) + model = WhisperModel( + model_size_or_path=os.path.dirname(model_path), + device="auto", + local_files_only=True, + ) + print("模型加载成功,开始识别") + sys.stdout.flush() + # 拿到指定文件夹里面的所有的MP3文件 + mp3_list = [] + for root, dirs, files in os.walk(mp3_folder): + for file in files: + if file.endswith(".mp3"): + mp3_list.append(os.path.join(root, file)) + + for mp in mp3_list: + segments, info = model.transcribe( + mp, + beam_size=5, + language="zh", + vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=1000), + ) + tmp_text = "" + for segment in segments: + tmp_text += segment.text + "。" + print(mp + "识别完成") + sys.stdout.flush() + text.append(tmp_text) + + # 数据写出 + print("文本全部识别成功,正在写出") + sys.stdout.flush() + tools = public_tools.PublicTools() + tools.write_to_file(text, os.path.join(out_folder, "文案.txt")) + print("写出完成") + sys.stdout.flush() + + +def GetTextTask(out_folder, mp, name): + text = [] + # 先获取模型 + print("正在下载或加载模型") + sys.stdout.flush() + model_path = Path( + hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="preprocessor_config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="tokenizer.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="vocabulary.json", + ) + model = WhisperModel( + model_size_or_path=os.path.dirname(model_path), + device="auto", + local_files_only=True, + ) + print("模型加载成功,开始识别") + sys.stdout.flush() + segments, info = model.transcribe( + mp, + beam_size=5, + language="zh", + vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=1000), + ) + tmp_text = "" + for segment in segments: + tmp_text += segment.text + "。" + print(mp + "识别完成") + sys.stdout.flush() + text.append(tmp_text) + + # 数据写出 + sys.stdout.flush() + tools = public_tools.PublicTools() + tools.write_to_file(text, os.path.join(out_folder, name + ".txt")) + sys.stdout.flush() + + +# GetTextTask( +# "C:\\Users\\27698\\Desktop\\测试\\mjTest", +# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\frame\\00001.mp4", +# "00001", +# ) + +if sys.argv[1] == "-ts": + GetText( + sys.argv[2], + sys.argv[3], + ) +elif sys.argv[1] == "-t": + GetTextTask( + sys.argv[2], + sys.argv[3], + sys.argv[4], + ) +else: + print("Params: ") + exit(0) diff --git a/resources/scripts/localWhisper/local_whisper.spec b/resources/scripts/localWhisper/local_whisper.spec new file mode 100644 index 0000000..de719d4 --- /dev/null +++ b/resources/scripts/localWhisper/local_whisper.spec @@ -0,0 +1,50 @@ +# -*- mode: python ; coding: utf-8 -*- + +from PyInstaller.building.datastruct import Tree +from PyInstaller.utils.hooks import get_package_paths + +PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1] +datas = [(PACKAGE_DIRECTORY, 'faster_whisper')] + + +a = Analysis( + ['local_whisper.py'], + pathex=[], + binaries=[], + datas=[], + hiddenimports=[], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + [], + exclude_binaries=True, + name='local_whisper', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) +coll = COLLECT( + exe, + a.binaries, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='local_whisper', +) diff --git a/resources/scripts/localWhisper/public_tools.py b/resources/scripts/localWhisper/public_tools.py new file mode 100644 index 0000000..90776a2 --- /dev/null +++ b/resources/scripts/localWhisper/public_tools.py @@ -0,0 +1,351 @@ +# 读取文件的方法 +import json +import os +import win32api +import win32con +import pywintypes +import shutil +import re + + +class PublicTools: + """ + 一些公用的基础方法 + """ + + def delete_path(self, path): + """ + 删除指定路径的文件或者是文件夹 + """ + # 检查路径是否存在 + if not os.path.exists(path): + return + + # 检查路径是文件还是文件夹 + if os.path.isfile(path): + # 是文件,执行删除 + try: + os.remove(path) + except Exception as e: + raise e + elif os.path.isdir(path): + # 是文件夹,执行删除 + try: + shutil.rmtree(path) + except Exception as e: + raise e + else: + raise + + def list_files_by_extension(self, folder_path, extension): + """ + 读取指定文件夹下面的所有的指定拓展文件命的文件列表 + """ + file_list = [] + for root, dirs, files in os.walk(folder_path): + for file in files: + if file.endswith(extension): + file_list.append(os.path.join(root, file)) + elif file.endswith(extension.upper()): + file_list.append(os.path.join(root, file)) + return file_list + + def get_fonts_from_registry(self, key_path): + """ + 获取注册表中安装的字体文件 + """ + font_names = [] + try: + key = win32api.RegOpenKeyEx( + ( + win32con.HKEY_LOCAL_MACHINE + if "HKEY_LOCAL_MACHINE" in key_path + else win32con.HKEY_CURRENT_USER + ), + key_path.split("\\", 1)[1], + 0, + win32con.KEY_READ, + ) + i = 0 + while True: + try: + value = win32api.RegEnumValue(key, i) + font_name = value[0] + # 使用正则表达式移除括号及其内容 + font_name = re.sub(r"\s*\([^)]*\)$", "", font_name) + font_names.append(font_name) + i += 1 + except pywintypes.error as e: + if e.winerror == 259: # 没有更多的数据 + break + else: + raise + finally: + try: + win32api.RegCloseKey(key) + except: + pass + return font_names + + def get_installed_fonts(self): + """ + 获取字体文件名称并返回 + """ + system_fonts = self.get_fonts_from_registry( + "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts" + ) + user_fonts = self.get_fonts_from_registry( + "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows NT\\CurrentVersion\\Fonts" + ) + all_fonts = list(set(system_fonts + user_fonts)) # 合并并去重 + return all_fonts + + # 将RRGGBB转换为BBGGRR + def convert_rrggbb_to_bbggrr(self, rrggbb): + """ + 将RRGGBB转换为BBGGRR + """ + if len(rrggbb) == 7: + rr = rrggbb[1:3] + gg = rrggbb[3:5] + bb = rrggbb[5:7] + return bb + gg + rr + else: + return "Invalid input" + + def write_to_file(self, arr, filename): + with open(filename, "w",encoding='utf-8') as f: + for item in arr: + f.write("%s\n" % item) + + +# 读取文件 +def read_file(fileType): + txt_path = input(f"输入{fileType}文件路径:") + txt_path = remove_prefix_and_suffix(txt_path, '"', '"') + while txt_path.strip() == "": + txt_path = input(f"输入{fileType}文件路径:") + + while os.path.exists(txt_path) == False: + print("文件路径不存在错误:") + txt_path = input(f"输入{fileType}文件路径:") + txt_path = remove_prefix_and_suffix(txt_path, '"', '"') + return txt_path + + +def format_time_ms(milliseconds): + """ + 时间转换将ms->小时:分钟:秒.毫秒格式 + """ + seconds = milliseconds / 1000 + # 计算小时、分钟和秒 + hours = int(seconds // 3600) + minutes = int((seconds % 3600) // 60) + seconds = seconds % 60 + # 格式化字符串 + # 使用`%02d`确保小时和分钟总是显示为两位数,`%.2f`确保秒数显示两位小数 + formatted_time = f"{hours}:{minutes:02d}:{seconds:05.2f}" + return formatted_time + + +# 删除满足条件的开头和结尾 +def remove_prefix_and_suffix(input_str, prefix_to_remove, suffix_to_remove): + if input_str.startswith(prefix_to_remove): + # 删除开头 + input_str = input_str[len(prefix_to_remove) :] + + if input_str.endswith(suffix_to_remove): + # 删除结尾 + input_str = input_str[: -len(suffix_to_remove)] + + return input_str + + +# 判断文件夹下面是不是有特定的文件夹 +def check_if_folder_exists(parent_folder, target_folder_name): + # 获取文件夹列表 + subfolders = [f.name for f in os.scandir(parent_folder) if f.is_dir()] + + # 检查特定文件夹是否存在 + if target_folder_name in subfolders: + return True + else: + return False + + +# 检查指定文件夹中是否存在特定文件。 +def file_exists_in_folder(folder_path: str, file_name: str) -> bool: + # 构建完整的文件路径 + file_path = os.path.join(folder_path, file_name) + + # 返回文件是否存在 + return os.path.isfile(file_path) + + +# 秒数转换,保留一位小数 +def convert_to_seconds(number, count): + seconds = number / 1000000 + rounded_number = round(seconds, count) + return rounded_number + + +def is_empty(obj): + if obj is None: + return True + elif isinstance(obj, str): + return len(obj) == 0 + elif isinstance(obj, list): + return len(obj) == 0 + elif isinstance(obj, dict): + return len(obj) == 0 + return False + + +def opt_dict(obj, key, default=None): + if obj is None: + return default + if key in obj: + v = obj[key] + if not is_empty(v): + return v + return default + + +def read_config(path, webui=True): + with open(path, "r", encoding="utf-8") as f: + runtime_config = json.load(f) + + if "config" not in runtime_config: + print("no filed 'config' in json") + return None + + config = runtime_config["config"] + if "webui" not in config: + print("no filed 'webui' in 'config'") + return None + + setting_config_path = config["setting"] + if not os.path.exists(setting_config_path): + setting_config_path = "config/" + setting_config_path + if not os.path.exists(setting_config_path): + setting_config_path = "../" + setting_config_path + + # read config + with open(setting_config_path, "r", encoding="utf-8") as f: + setting_config = json.load(f) + + # set workspace parent:根目录 + if "workspace" in setting_config: + setting_config["workspace"]["parent"] = runtime_config["workspace"] + else: + setting_config["workspace"] = {"parent": runtime_config["workspace"]} + setting_config["video"] = opt_dict(runtime_config, "video") + + # merge setting config + if "setting" in config: + setting_config.update(runtime_config["setting"]) + + # webui config + if webui: + webui_config_path = config["webui"] + if not os.path.exists(webui_config_path): + webui_config_path = "config/webui/" + webui_config_path + if not os.path.exists(webui_config_path): + webui_config_path = "../" + webui_config_path + + with open(webui_config_path, "r", encoding="utf-8") as f: + webui_config = json.load(f) + + # merge webui config + if "webui" in runtime_config: + webui_config.update(runtime_config["webui"]) + + return webui_config, setting_config + return setting_config + + +TAG_MODE_NONE = "" + + +# 工作路径 +class Workspace: + + def __init__( + self, + root: str, + input: str, + output: str, + input_crop: str, + output_crop: str, + input_tag: str, + input_mask: str, + input_crop_mask: str, + crop_info: str, + ): + self.root = root + self.input = input + self.output = output + self.input_crop = input_crop + self.output_crop = output_crop + self.input_tag = input_tag + self.input_mask = input_mask + self.input_crop_mask = input_crop_mask + self.crop_info = crop_info + + +# 定义一个倍数函数 +def round_up(num, mul): + return (num // mul + 1) * mul + + +class SettingConfig: + + def __init__(self, config: dict, workParent): + self.config = config + self.webui_work_api = None + self.workParent = workParent + + def to_dict(self): + return self.__dict__ + + def get_tag_mode(self): + tag_cfg = opt_dict(self.config, "tag") + return opt_dict(tag_cfg, "mode", TAG_MODE_NONE) + + def get_tag_actions(self): + tag_cfg = opt_dict(self.config, "tag") + return opt_dict(tag_cfg, "actions", []) + + def get_workspace_config(self) -> Workspace: + workspace_config = opt_dict(self.config, "workspace") + tmp_config = opt_dict(workspace_config, "tmp") + + input = opt_dict(workspace_config, "input", "input") + output = opt_dict(workspace_config, "output", "output") + workspace_parent = self.workParent + + tmp_parent = opt_dict(tmp_config, "parent", "tmp") + input_crop = opt_dict(tmp_config, "input_crop", "input_crop") + output_crop = opt_dict(tmp_config, "output_crop", "output_crop") + input_tag = opt_dict(tmp_config, "input_tag", "input_crop") + input_mask = opt_dict(tmp_config, "input_mask", "input_mask") + input_crop_mask = opt_dict(tmp_config, "input_crop_mask", "input_crop_mask") + crop_info = opt_dict(tmp_config, "crop_info", "crop_info.txt") + + tmp_path = os.path.join(workspace_parent, tmp_parent) + + return Workspace( + workspace_parent, + os.path.join(workspace_parent, input), + os.path.join(workspace_parent, output), + os.path.join(tmp_path, input_crop), + os.path.join(tmp_path, output_crop), + os.path.join(tmp_path, input_tag), + os.path.join(tmp_path, input_mask), + os.path.join(tmp_path, input_crop_mask), + os.path.join(tmp_path, crop_info), + ) + + def enable_tag(self): + tag_cfg = opt_dict(self.config, "tag") + return opt_dict(tag_cfg, "enable", True) diff --git a/resources/scripts/localWhisper/shotSplit.py b/resources/scripts/localWhisper/shotSplit.py new file mode 100644 index 0000000..e07a270 --- /dev/null +++ b/resources/scripts/localWhisper/shotSplit.py @@ -0,0 +1,307 @@ +# pip install scenedetect opencv-python -i https://pypi.tuna.tsinghua.edu.cn/simple + +from scenedetect.video_manager import VideoManager +from scenedetect.scene_manager import SceneManager +from scenedetect.stats_manager import StatsManager +from scenedetect.detectors.content_detector import ContentDetector +import os +import sys +import json +import subprocess +from huggingface_hub import hf_hub_download +from faster_whisper import WhisperModel +from pathlib import Path +import public_tools + +# 获取智能画面分割的时间或者秒数 +def find_scenes(video_path, sensitivity): + print( + "正在计算分镜数据" + "sensitivity:" + str(sensitivity) + "path : " + video_path + ) + sys.stdout.flush() + video_manager = VideoManager([video_path]) + stats_manager = StatsManager() + scene_manager = SceneManager(stats_manager) + + # 使用contect-detector + scene_manager.add_detector(ContentDetector(threshold=float(sensitivity))) + + shijian_list = [] + + try: + video_manager.set_downscale_factor() + video_manager.start() + scene_manager.detect_scenes(frame_source=video_manager) + scene_list = scene_manager.get_scene_list() + print("分镜数据列表:") + sys.stdout.flush() + for i, scene in enumerate(scene_list): + shijian_list.append([scene[0].get_timecode(), scene[1].get_timecode()]) + print( + "Scene %2d: Start %s / Frame %d, End %s / Frame %d" + % ( + i + 1, + scene[0].get_timecode(), + scene[0].get_frames(), + scene[1].get_timecode(), + scene[1].get_frames(), + ) + ) + sys.stdout.flush() + finally: + video_manager.release() + + return shijian_list + + +# 如果不存在就创建 +def createDir(file_dir): + # 如果不存在文件夹,就创建 + if not os.path.isdir(file_dir): + os.mkdir(file_dir) + + +# 切分一个视频 +def ClipVideo(video_path, out_folder, image_out_folder, sensitivity, gpu_type): + shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表 + shijian_list_len = len(shijian_list) + + print("总共有%s个场景" % str(shijian_list_len)) + sys.stdout.flush() + video_list = [] + for i in range(0, shijian_list_len): + start_time_str = shijian_list[i][0] + end_time_str = shijian_list[i][1] + + print("开始输出第" + str(i + 1) + "个分镜") + video_name = "{:05d}".format(i + 1) + out_video_file = os.path.join(out_folder, video_name + ".mp4") + sys.stdout.flush() + video_list.append( + { + "start_time_str": start_time_str, + "end_time_str": end_time_str, + "out_video_file": out_video_file, + "video_name": video_name, + } + ) + + # 使用 ffmpeg 裁剪视频 + command = [] + command.append("ffmpeg") + command.append("-i") + command.append(video_path) + command.append("-ss") + command.append(start_time_str) + command.append("-to") + command.append(end_time_str) + command.append("-c:v") + + if gpu_type == "NVIDIA": + command.append("h264_nvenc") + elif gpu_type == "AMD": + command.append("h264_amf") + else: + command.append("libx264") + + command.append("-preset") + command.append("fast") + command.append("-c:a") + command.append("copy") + command.append(out_video_file) + command.append("-loglevel") + command.append("error") + + subprocess.run( + command, + check=True, + stderr=subprocess.PIPE, + ) + + print("分镜输出完成。开始抽帧") + sys.stdout.flush() + for vi in video_list: + h, m, s = vi["start_time_str"].split(":") + start_seconds = int(h) * 3600 + int(m) * 60 + float(s) + + h, m, s = vi["end_time_str"].split(":") + end_seconds = int(h) * 3600 + int(m) * 60 + float(s) + print("正在抽帧:" + vi["video_name"]) + sys.stdout.flush() + subprocess.run( + [ + "ffmpeg", + "-ss", + str((end_seconds - start_seconds) / 2), + "-i", + vi["out_video_file"], + "-frames:v", + "1", + os.path.join(image_out_folder, vi["video_name"] + ".png"), + "-loglevel", + "error", + ] + ) + + print("抽帧完成,开始识别文案") + sys.stdout.flush() + return video_list + + +def SplitAudio(video_out_folder, video_list): + # ffmpeg -i input_file.mp4 -vn -ab 128k output_file.mp3 + print("正在分离音频!!") + mp3_list = [] + sys.stdout.flush() + for v in video_list: + mp3_path = os.path.join(video_out_folder, v["video_name"] + ".mp3") + mp3_list.append(mp3_path) + subprocess.run( + [ + "ffmpeg", + "-i", + v["out_video_file"], + "-vn", + "-ab", + "128k", + mp3_path, + "-loglevel", + "error", + ], + check=True, + ) + return mp3_list + + +def GetText(out_folder, mp3_list): + text = [] + # 先获取模型 + print("正在下载或加载模型") + sys.stdout.flush() + model_path = Path( + hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="preprocessor_config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="tokenizer.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="vocabulary.json", + ) + model = WhisperModel( + model_size_or_path=os.path.dirname(model_path), + device="auto", + local_files_only=True, + ) + print("模型加载成功,开始识别") + sys.stdout.flush() + for mp in mp3_list: + segments, info = model.transcribe( + mp, + beam_size=5, + language="zh", + vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=1000), + ) + tmp_text = "" + for segment in segments: + tmp_text += segment.text + "。" + print(mp + "识别完成") + sys.stdout.flush() + text.append(tmp_text) + + # 数据写出 + print("文本全部识别成功,正在写出") + sys.stdout.flush() + tools = public_tools.PublicTools() + tools.write_to_file(text, os.path.join(out_folder, "文案.txt")) + print("写出完成") + sys.stdout.flush() + + +def GetTextTask(out_folder, mp, name): + text = [] + # 先获取模型 + print("正在下载或加载模型") + sys.stdout.flush() + model_path = Path( + hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="preprocessor_config.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="tokenizer.json", + ) + hf_hub_download( + repo_id="Systran/faster-whisper-large-v3", + filename="vocabulary.json", + ) + model = WhisperModel( + model_size_or_path=os.path.dirname(model_path), + device="auto", + local_files_only=True, + ) + print("模型加载成功,开始识别") + sys.stdout.flush() + segments, info = model.transcribe( + mp, + beam_size=5, + language="zh", + vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=1000), + ) + tmp_text = "" + for segment in segments: + tmp_text += segment.text + "。" + print(mp + "识别完成") + sys.stdout.flush() + text.append(tmp_text) + + # 数据写出 + sys.stdout.flush() + tools = public_tools.PublicTools() + tools.write_to_file(text, os.path.join(out_folder, name + ".txt")) + sys.stdout.flush() + + +def get_fram(video_path, out_path, sensitivity): + try: + shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表 + print("总共有%s个场景" % str(len(shijian_list))) + print("开始输出json") + print(shijian_list) + # 将数组中的消息写道json文件中 + with open(out_path, "w") as file: + # 将数组写入到指定的json文件 + json.dump(shijian_list, file) + print("输出完成") + except Exception as e: + print("出现错误" + str(e)) + exit(0) + + +def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type): + v_l = ClipVideo( + video_path, video_out_folder, image_out_folder, sensitivity, gpu_type + ) + + # 开始分离音频 + m_l = SplitAudio(video_out_folder, v_l) + # 开始识别字幕 + GetText(os.path.dirname(video_out_folder), m_l) diff --git a/resources/scripts/requirements.txt b/resources/scripts/requirements.txt deleted file mode 100644 index b45888f..0000000 --- a/resources/scripts/requirements.txt +++ /dev/null @@ -1,298 +0,0 @@ -accelerate==0.30.1 -addict==2.4.0 -aiofiles==23.2.1 -aiohttp==3.8.6 -aiosignal==1.3.1 -alibabacloud-bailian20230601==1.6.1 -alibabacloud-credentials==0.3.3 -alibabacloud-endpoint-util==0.0.3 -alibabacloud-gateway-spi==0.0.1 -alibabacloud-openapi-util==0.2.2 -alibabacloud-tea==0.3.6 -alibabacloud-tea-openapi==0.3.9 -alibabacloud-tea-util==0.3.12 -alibabacloud-tea-xml==0.0.2 -aliyun-python-sdk-core==2.15.0 -aliyun-python-sdk-kms==2.16.2 -altair==5.3.0 -altgraph==0.17.4 -annotated-types==0.6.0 -anthropic==0.26.1 -antlr4-python3-runtime==4.9.3 -anyio==4.3.0 -APScheduler==3.10.4 -arxiv==2.1.0 -astor==0.8.1 -asttokens==2.4.1 -async-timeout==4.0.3 -attrdict==2.0.1 -attrs==23.2.0 -av==11.0.0 -azure-cognitiveservices-speech==1.37.0 -Babel==2.15.0 -backports.tarfile==1.1.1 -baidu-aip==4.16.13 -bce-python-sdk==0.9.11 -beautifulsoup4==4.12.3 -bidict==0.23.1 -blinker==1.8.2 -broadscope-bailian==1.3.1 -cachetools==5.3.3 -certifi==2024.2.2 -cffi==1.16.0 -cfgv==3.4.0 -chardet==5.2.0 -charset-normalizer==3.3.2 -chatgpt-tool-hub==0.5.0 -cheroot==10.0.1 -click==8.1.7 -colorama==0.4.6 -coloredlogs==15.0.1 -comtypes==1.4.2 -contourpy==1.2.1 -controlnet-aux==0.0.3 -crcmod==1.7 -cryptography==42.0.5 -cssselect==1.2.0 -cssutils==2.11.0 -ctranslate2==4.1.0 -curl_cffi==0.6.4 -cx-Logging==3.1.0 -cx_Freeze==6.15.16 -cycler==0.12.1 -Cython==3.0.10 -dashscope==1.19.2 -datasets==2.18.0 -decorator==4.4.2 -diffusers==0.27.2 -dill==0.3.8 -dingtalk-stream==0.18.1 -distlib==0.3.8 -distro==1.9.0 -dnspython==2.6.1 -dulwich==0.22.1 -easydict==1.13 -edge-tts==6.1.12 -einops==0.7.0 -elevenlabs==1.0.3 -email_validator==2.1.1 -et-xmlfile==1.1.0 -exceptiongroup==1.2.1 -executing==2.0.1 -fastapi==0.108.0 -fastapi-cli==0.0.2 -faster-whisper==1.0.1 -feedparser==6.0.10 -ffmpy==0.3.2 -filelock==3.13.1 -fire==0.6.0 -Flask==3.0.3 -flask-babel==4.0.0 -flatbuffers==24.3.7 -fonttools==4.53.0 -frozenlist==1.4.1 -fsspec==2024.2.0 -future==1.0.0 -gast==0.5.4 -google-ai-generativelanguage==0.6.4 -google-api-core==2.19.0 -google-api-python-client==2.130.0 -google-auth==2.29.0 -google-auth-httplib2==0.2.0 -google-generativeai==0.5.4 -googleapis-common-protos==1.63.0 -gradio==4.21.0 -gradio_client==0.12.0 -grpcio==1.64.0 -grpcio-status==1.62.2 -gTTS==2.5.1 -h11==0.14.0 -HTMLParser==0.0.2 -httpcore==1.0.5 -httplib2==0.22.0 -httptools==0.6.1 -httpx==0.27.0 -huggingface-hub==0.23.2 -humanfriendly==10.0 -identify==2.5.36 -idna==3.6 -imageio==2.34.0 -imageio-ffmpeg==0.4.9 -imgaug==0.4.0 -importlib_metadata==7.0.2 -importlib_resources==6.4.0 -install==1.3.5 -IOPaint==1.3.3 -ipython==8.24.0 -itsdangerous==2.2.0 -jaraco.context==5.3.0 -jaraco.functools==4.0.1 -jedi==0.19.1 -Jinja2==3.1.3 -jiter==0.4.0 -jmespath==0.10.0 -jsonschema==4.22.0 -jsonschema-specifications==2023.12.1 -kiwisolver==1.4.5 -langid==1.1.6 -lazy_loader==0.4 -lief==0.14.1 -linkai==0.0.6.0 -lmdb==1.4.1 -loguru==0.7.2 -lxml==5.2.2 -markdown-it-py==3.0.0 -MarkupSafe==2.1.5 -matplotlib==3.9.0 -matplotlib-inline==0.1.7 -mdurl==0.1.2 -modelscope==1.13.1 -more-itertools==10.2.0 -moviepy==1.0.3 -mpmath==1.3.0 -multidict==6.0.5 -multiprocess==0.70.16 -networkx==3.2.1 -nodeenv==1.8.0 -Nuitka==2.1.2 -numpy==1.24.2 -omegaconf==2.3.0 -onnxruntime==1.17.1 -openai==0.27.8 -opencv-contrib-python==4.6.0.66 -opencv-python==4.6.0.66 -opencv-python-headless==4.9.0.80 -openpyxl==3.1.2 -opt-einsum==3.3.0 -optionaldict==0.1.2 -ordered-set==4.1.0 -orjson==3.10.3 -oss2==2.18.4 -packaging==24.0 -paddleocr==2.7.3 -paddlepaddle==2.6.1 -pandas==2.2.1 -parso==0.8.4 -pdf2docx==0.5.8 -pefile==2023.2.7 -peft==0.7.1 -piexif==1.1.3 -pillow==10.3.0 -platformdirs==4.2.0 -pre-commit==3.7.1 -premailer==3.10.0 -proglog==0.1.10 -prompt-toolkit==3.0.43 -proto-plus==1.23.0 -protobuf==3.20.2 -psutil==5.9.8 -pure-eval==0.2.2 -pyarrow==15.0.1 -pyarrow-hotfix==0.6 -pyasn1==0.6.0 -pyasn1_modules==0.4.0 -pyclipper==1.3.0.post5 -pycparser==2.21 -pycryptodome==3.20.0 -pydantic==2.5.3 -pydantic_core==2.14.6 -pydub==0.25.1 -Pygments==2.18.0 -pyinstaller==6.5.0 -pyinstaller-hooks-contrib==2024.3 -PyJWT==2.8.0 -PyMuPDF==1.24.5 -PyMuPDFb==1.24.3 -pyOpenSSL==24.1.0 -pyoxidizer==0.24.0 -pyparsing==3.1.2 -pypiwin32==223 -pypng==0.20220715.0 -PyQRCode==1.2.1 -pyreadline3==3.4.1 -pytesseract==0.3.10 -python-dateutil==2.9.0.post0 -python-docx==1.1.2 -python-dotenv==1.0.1 -python-engineio==4.9.1 -python-multipart==0.0.9 -python-socketio==5.7.2 -pyttsx3==2.90 -pytz==2024.1 -pywin32==306 -pywin32-ctypes==0.2.2 -PyYAML==6.0.1 -qrcode==7.4.2 -rapidfuzz==3.9.3 -rarfile==4.2 -referencing==0.35.1 -regex==2024.5.15 -requests==2.31.0 -rich==13.7.1 -rpds-py==0.18.1 -rsa==4.9 -ruff==0.4.7 -safetensors==0.4.3 -scenedetect==0.6.3 -scikit-image==0.23.2 -scipy==1.12.0 -semantic-version==2.10.0 -sgmllib3k==1.0.0 -shapely==2.0.4 -shellingham==1.5.4 -simple-websocket==1.0.0 -simplejson==3.19.2 -six==1.16.0 -sniffio==1.3.1 -sortedcontainers==2.4.0 -soupsieve==2.5 -SpeechRecognition==3.10.4 -stack-data==0.6.3 -starlette==0.32.0.post1 -sympy==1.12 -tenacity==8.2.3 -termcolor==2.4.0 -tifffile==2024.5.22 -tiktoken==0.4.0 -timm==1.0.3 -tokenizers==0.19.1 -tomli==2.0.1 -tomlkit==0.12.0 -toolz==0.12.1 -torch==2.1.2+cu118 -torchvision==0.16.2+cu118 -tqdm==4.66.2 -traitlets==5.14.3 -transformers==4.41.2 -typer==0.12.3 -typer-config==1.4.0 -typing_extensions==4.10.0 -tzdata==2024.1 -tzlocal==5.2 -ujson==5.9.0 -uritemplate==4.1.1 -urllib3==2.2.1 -utility==1.0 -uvicorn==0.29.0 -virtualenv==20.26.2 -visualdl==2.5.3 -watchfiles==0.21.0 -wcwidth==0.2.13 -web.py==0.62 -websocket-client==1.2.0 -websockets==11.0.3 -wechatpy==1.8.18 -Werkzeug==3.0.3 -wikipedia==1.4.0 -win32-setctime==1.1.0 -wolframalpha==5.0.0 -wsproto==1.2.0 -xlrd==2.0.1 -xmltodict==0.13.0 -xxhash==3.4.1 -yacs==0.1.8 -yapf==0.40.2 -yarl==1.9.4 -zhipuai==2.1.0.20240521 -zipp==3.18.1 -zstandard==0.22.0 diff --git a/resources/scripts/shotSplit.py b/resources/scripts/shotSplit.py index d177c4a..e2a9f9a 100644 --- a/resources/scripts/shotSplit.py +++ b/resources/scripts/shotSplit.py @@ -8,8 +8,8 @@ import os import sys import json import subprocess -from huggingface_hub import hf_hub_download -from faster_whisper import WhisperModel +# from huggingface_hub import hf_hub_download +# from faster_whisper import WhisperModel import public_tools from pathlib import Path @@ -174,111 +174,111 @@ def SplitAudio(video_out_folder, video_list): return mp3_list -def GetText(out_folder, mp3_list): - text = [] - # 先获取模型 - print("正在下载或加载模型") - sys.stdout.flush() - model_path = Path( - hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="config.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="preprocessor_config.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="tokenizer.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="vocabulary.json", - ) - model = WhisperModel( - model_size_or_path=os.path.dirname(model_path), - device="auto", - local_files_only=True, - ) - print("模型加载成功,开始识别") - sys.stdout.flush() - for mp in mp3_list: - segments, info = model.transcribe( - mp, - beam_size=5, - language="zh", - vad_filter=True, - vad_parameters=dict(min_silence_duration_ms=1000), - ) - tmp_text = "" - for segment in segments: - tmp_text += segment.text + "。" - print(mp + "识别完成") - sys.stdout.flush() - text.append(tmp_text) +# def GetText(out_folder, mp3_list): + # text = [] + # # 先获取模型 + # print("正在下载或加载模型") + # sys.stdout.flush() + # model_path = Path( + # hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="config.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="preprocessor_config.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="tokenizer.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="vocabulary.json", + # ) + # model = WhisperModel( + # model_size_or_path=os.path.dirname(model_path), + # device="auto", + # local_files_only=True, + # ) + # print("模型加载成功,开始识别") + # sys.stdout.flush() + # for mp in mp3_list: + # segments, info = model.transcribe( + # mp, + # beam_size=5, + # language="zh", + # vad_filter=True, + # vad_parameters=dict(min_silence_duration_ms=1000), + # ) + # tmp_text = "" + # for segment in segments: + # tmp_text += segment.text + "。" + # print(mp + "识别完成") + # sys.stdout.flush() + # text.append(tmp_text) - # 数据写出 - print("文本全部识别成功,正在写出") - sys.stdout.flush() - tools = public_tools.PublicTools() - tools.write_to_file(text, os.path.join(out_folder, "文案.txt")) - print("写出完成") - sys.stdout.flush() + # # 数据写出 + # print("文本全部识别成功,正在写出") + # sys.stdout.flush() + # tools = public_tools.PublicTools() + # tools.write_to_file(text, os.path.join(out_folder, "文案.txt")) + # print("写出完成") + # sys.stdout.flush() -def GetTextTask(out_folder, mp, name): - text = [] - # 先获取模型 - print("正在下载或加载模型") - sys.stdout.flush() - model_path = Path( - hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="config.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="preprocessor_config.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="tokenizer.json", - ) - hf_hub_download( - repo_id="Systran/faster-whisper-large-v3", - filename="vocabulary.json", - ) - model = WhisperModel( - model_size_or_path=os.path.dirname(model_path), - device="auto", - local_files_only=True, - ) - print("模型加载成功,开始识别") - sys.stdout.flush() - segments, info = model.transcribe( - mp, - beam_size=5, - language="zh", - vad_filter=True, - vad_parameters=dict(min_silence_duration_ms=1000), - ) - tmp_text = "" - for segment in segments: - tmp_text += segment.text + "。" - print(mp + "识别完成") - sys.stdout.flush() - text.append(tmp_text) +# def GetTextTask(out_folder, mp, name): + # text = [] + # # 先获取模型 + # print("正在下载或加载模型") + # sys.stdout.flush() + # model_path = Path( + # hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin") + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="config.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="preprocessor_config.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="tokenizer.json", + # ) + # hf_hub_download( + # repo_id="Systran/faster-whisper-large-v3", + # filename="vocabulary.json", + # ) + # model = WhisperModel( + # model_size_or_path=os.path.dirname(model_path), + # device="auto", + # local_files_only=True, + # ) + # print("模型加载成功,开始识别") + # sys.stdout.flush() + # segments, info = model.transcribe( + # mp, + # beam_size=5, + # language="zh", + # vad_filter=True, + # vad_parameters=dict(min_silence_duration_ms=1000), + # ) + # tmp_text = "" + # for segment in segments: + # tmp_text += segment.text + "。" + # print(mp + "识别完成") + # sys.stdout.flush() + # text.append(tmp_text) - # 数据写出 - sys.stdout.flush() - tools = public_tools.PublicTools() - tools.write_to_file(text, os.path.join(out_folder, name + ".txt")) - sys.stdout.flush() + # # 数据写出 + # sys.stdout.flush() + # tools = public_tools.PublicTools() + # tools.write_to_file(text, os.path.join(out_folder, name + ".txt")) + # sys.stdout.flush() def get_fram(video_path, out_path, sensitivity): @@ -297,12 +297,12 @@ def get_fram(video_path, out_path, sensitivity): exit(0) -def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type): - v_l = ClipVideo( - video_path, video_out_folder, image_out_folder, sensitivity, gpu_type - ) +# def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type): +# v_l = ClipVideo( +# video_path, video_out_folder, image_out_folder, sensitivity, gpu_type +# ) - # 开始分离音频 - m_l = SplitAudio(video_out_folder, v_l) - # 开始识别字幕 - GetText(os.path.dirname(video_out_folder), m_l) +# # 开始分离音频 +# m_l = SplitAudio(video_out_folder, v_l) +# # 开始识别字幕 +# GetText(os.path.dirname(video_out_folder), m_l) diff --git a/src/main/Service/Book/ReverseBook.ts b/src/main/Service/Book/ReverseBook.ts index 348da79..55bb375 100644 --- a/src/main/Service/Book/ReverseBook.ts +++ b/src/main/Service/Book/ReverseBook.ts @@ -8,7 +8,7 @@ import { DEFINE_STRING } from "../../../define/define_string"; import path from 'path' import { BasicReverse } from './basicReverse' import { BookTaskDetailService } from '../../../define/db/service/Book/bookTaskDetailService' -import { TaskScheduler } from "../task/taskScheduler" +import { LogScheduler } from "../task/logScheduler" import { Book } from '../../../model/book' import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum' import { GeneralResponse } from '../../../model/generalResponse' @@ -28,7 +28,7 @@ import { isEmpty } from 'lodash' */ export class ReverseBook { basicReverse: BasicReverse - taskScheduler: TaskScheduler + logScheduler: LogScheduler mjOpt: MJOpt = new MJOpt() sdOpt: SDOpt = new SDOpt() tagDefine: TagDefine @@ -42,7 +42,7 @@ export class ReverseBook { this.tagDefine = new TagDefine() this.subtitle = new Subtitle() this.watermark = new Watermark() - this.taskScheduler = new TaskScheduler() + this.logScheduler = new LogScheduler() this.bookServiceBasic = new BookServiceBasic() this.bookBasic = new BookBasic() } @@ -301,7 +301,7 @@ export class ReverseBook { await this.bookServiceBasic.AddBookBackTask(book.id, task_type, TaskExecuteType.AUTO, bookTaskDetail.bookTaskId, bookTaskDetail.id, DEFINE_STRING.BOOK.REVERSE_PROMPT_RETURN ); // 添加返回日志 - await this.taskScheduler.AddLogToDB(book.id, book.type, `添加 ${task_type} 反推任务成功`, bookTaskDetail.bookTaskId, LoggerStatus.SUCCESS) + await this.logScheduler.AddLogToDB(book.id, book.type, `添加 ${task_type} 反推任务成功`, bookTaskDetail.bookTaskId, LoggerStatus.SUCCESS) } } catch (error) { throw error diff --git a/src/main/Service/Book/basicReverse.ts b/src/main/Service/Book/basicReverse.ts index d63c88a..51354f7 100644 --- a/src/main/Service/Book/basicReverse.ts +++ b/src/main/Service/Book/basicReverse.ts @@ -5,7 +5,7 @@ const { exec } = require('child_process') const execAsync = util.promisify(exec) import { define } from '../../../define/define' import { BookService } from '../../../define/db/service/Book/bookService' -import { TaskScheduler } from '../task/taskScheduler' +import { LogScheduler } from '../task/logScheduler' import { LoggerStatus, LoggerType, OtherData } from '../../../define/enum/softwareEnum' import { errorMessage, successMessage } from '../../Public/generalTools' import { CheckFileOrDirExist, CheckFolderExistsOrCreate } from '../../../define/Tools/file' @@ -35,11 +35,11 @@ export class BasicReverse { bookTaskDetailService: BookTaskDetailService bookBackTaskListService: BookBackTaskListService - taskScheduler: TaskScheduler + logScheduler: LogScheduler ffmpegOptions: FfmpegOptions constructor() { - this.taskScheduler = new TaskScheduler() + this.logScheduler = new LogScheduler() this.ffmpegOptions = new FfmpegOptions() } @@ -109,7 +109,7 @@ export class BasicReverse { if (taskRes.code == 0) { throw new Error(taskRes.message) } - this.taskScheduler.AddLogToDB( + this.logScheduler.AddLogToDB( bookId, book.type, `添加分镜任务成功`, @@ -149,7 +149,7 @@ export class BasicReverse { let sensitivity = 30 // 开始之前,推送日志 let log_content = `开始进行分镜操作,视频地址:${oldVideoPath},敏感度:${sensitivity},正在调用程序进行处理` - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, log_content, @@ -170,7 +170,7 @@ export class BasicReverse { // 有错误输出 if (output.stderr != '') { let error_msg = `分镜成功,但有警告提示:${output.stderr}` - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, error_msg, @@ -187,7 +187,7 @@ export class BasicReverse { BookTaskStatus.STORYBOARD_FAIL, error_message ) - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, error_message, @@ -205,7 +205,7 @@ export class BasicReverse { BookTaskStatus.STORYBOARD_FAIL, error_msg ) - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, error_msg, @@ -237,7 +237,7 @@ export class BasicReverse { this.bookTaskService.UpdateBookTaskStatus(bookTaskId, BookTaskStatus.STORYBOARD_DONE) // 分镜成功,推送日志 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `分镜成功,分镜数据如下:${frameJsonData}`, @@ -310,7 +310,7 @@ export class BasicReverse { if (bookTaskDetail.data.length <= 0) { // 传入的分镜数据为空,需要重新获取 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `没有传入分镜数据,开始调用分镜方法`, @@ -339,7 +339,7 @@ export class BasicReverse { this.bookTaskService.UpdateBookTaskStatus(bookTask.id, BookTaskStatus.SPLIT) // 有分镜数据,开始处理 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `成功获取分镜数据,开始添加裁剪视频任务`, @@ -363,7 +363,7 @@ export class BasicReverse { } } // 添加日志 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `添加视频裁剪任务成功`, @@ -424,7 +424,7 @@ export class BasicReverse { // 小改小说批次的状态 this.bookTaskService.UpdateBookTaskStatus(bookTaskDetail.bookTaskId, BookTaskStatus.SPLIT_DONE) // 结束,分镜完毕,推送日志,返回成功 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookTaskDetail.bookId, book.type, `${bookTaskDetail.name}_视频裁剪完成`, @@ -490,7 +490,7 @@ export class BasicReverse { }) } if (bookTaskRes.data.bookTasks.length <= 0 || bookTaskRes.data.total <= 0) { - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `没有找到对应的小说批次任务数据,请检查bookId是否正确`, @@ -508,7 +508,7 @@ export class BasicReverse { bookTaskId: bookTask.id }) if (bookTaskDetails.data.length <= 0) { - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `没有找到对应的小说批次任务数据,请检查bookId是否正确,或者手动执行`, @@ -531,7 +531,7 @@ export class BasicReverse { throw new Error(taskRes.message) } // 添加日志 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `添加音频 ${taskRes.data.name} 分离任务成功`, @@ -588,7 +588,7 @@ export class BasicReverse { }) // 推送成功消息 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( task.bookId, book.type, `${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`, @@ -656,7 +656,7 @@ export class BasicReverse { throw new Error(taskRes.message) } - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `添加 ${taskRes.data.name} 抽帧任务成功`, @@ -701,7 +701,7 @@ export class BasicReverse { }) // 推送成功消息 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( book.id, book.type, `${bookTaskDetail.name}抽帧成功,输出地址:${outputFramePath}`, @@ -797,7 +797,7 @@ export class BasicReverse { } } - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `添加提取字幕任务成功`, @@ -821,9 +821,9 @@ export class BasicReverse { // 判断是不是用本地的wisper服务 if (isWisper) { // 开始调用wisper - // 使用异步的方法调用一个python程序,然后写入到指定的json文件中k + // 使用异步的方法调用一个python程序,然后写入到指定的json文件中 let out_dir = path.dirname(bookTaskDetail.videoPath) - + // #TODO -t 被移除 let command = `"${path.join(define.scripts_path, 'Lai.exe')}" "-t" "${out_dir}" "${bookTaskDetail.audioPath }" "${bookTaskDetail.name}"` const output = await execAsync(command, { @@ -833,7 +833,7 @@ export class BasicReverse { // 有错误输出 if (output.stderr != '') { let error_msg = `提取字幕成功,但有警告提示:${output.stderr}` - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( book.id, book.type, error_msg, @@ -856,7 +856,7 @@ export class BasicReverse { }) // 提取字幕成功,推送日志 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( book.id, book.type, `${bookTaskDetail.name} 提取字幕成功`, diff --git a/src/main/Service/Book/bookFrame.ts b/src/main/Service/Book/bookFrame.ts index 2c9966c..502e0d9 100644 --- a/src/main/Service/Book/bookFrame.ts +++ b/src/main/Service/Book/bookFrame.ts @@ -7,7 +7,7 @@ import { FfmpegOptions } from "../ffmpegOptions"; import { CheckFileOrDirExist, CopyFileOrFolder, DeleteFolderAllFile } from "../../../define/Tools/file"; import fs from 'fs'; import { Book } from "../../../model/book"; -import { TaskScheduler } from '../task/taskScheduler'; +import { LogScheduler } from '../task/logScheduler'; import { BookBasic } from "./BooKBasic"; import { LoggerStatus, OtherData } from "../../../define/enum/softwareEnum"; import { BasicReverse } from "./basicReverse"; @@ -15,18 +15,17 @@ import { BasicReverse } from "./basicReverse"; export class BookFrame { bookServiceBasic: BookServiceBasic ffmpegOptions: FfmpegOptions - taskScheduler: TaskScheduler + logScheduler: LogScheduler basicReverse: BasicReverse bookBasic: BookBasic constructor() { this.bookServiceBasic = new BookServiceBasic(); this.ffmpegOptions = new FfmpegOptions(); - this.taskScheduler = new TaskScheduler() + this.logScheduler = new LogScheduler() this.bookBasic = new BookBasic() this.basicReverse = new BasicReverse() } - /** * 替换指定分镜的视频当前帧 * @param bookTaskDetailId 指定的小说分镜ID @@ -138,7 +137,7 @@ export class BookFrame { }) } catch (error) { // 传入的分镜数据为空,需要重新获取 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, `没有传入分镜数据,请先进行分镜计算`, @@ -164,7 +163,7 @@ export class BookFrame { } let res = await this.basicReverse.FrameDataToCutVideoData(item, shortClipData[i]); } - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( bookId, book.type, "所有的视频裁剪完成,开始抽帧", diff --git a/src/main/Service/Book/bookVideo.ts b/src/main/Service/Book/bookVideo.ts index d50e6a7..35f5c09 100644 --- a/src/main/Service/Book/bookVideo.ts +++ b/src/main/Service/Book/bookVideo.ts @@ -173,6 +173,12 @@ export class BookVideo { } + /** + * 添加剪映草稿 + * @param id + * @param operateBookType + * @returns + */ async AddJianyingDraft(id: string, operateBookType: OperateBookType): Promise { try { await this.InitService(); diff --git a/src/main/Service/MJ/mj.ts b/src/main/Service/MJ/mj.ts index d440b13..5d931e5 100644 --- a/src/main/Service/MJ/mj.ts +++ b/src/main/Service/MJ/mj.ts @@ -13,7 +13,7 @@ import { MJSetting } from "../../../model/Setting/mjSetting"; import { GeneralResponse } from "../../../model/generalResponse" import { LoggerStatus, ResponseMessageType } from "../../../define/enum/softwareEnum"; import { ImageStyle } from "../Book/imageStyle"; -import { TaskScheduler } from "../task/taskScheduler"; +import { LogScheduler } from "../task/logScheduler"; import { Tools } from "../../../main/tools" import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic"; import { PresetService } from "../presetService"; @@ -28,14 +28,14 @@ export class MJOpt { mjApi: MJApi; mjSetting: MJSetting.MjSetting imageStyle: ImageStyle; - taskScheduler: TaskScheduler; + logScheduler: LogScheduler; tools: Tools; bookServiceBasic: BookServiceBasic presetService: PresetService softWareServiceBasic: SoftWareServiceBasic constructor() { this.imageStyle = new ImageStyle() - this.taskScheduler = new TaskScheduler() + this.logScheduler = new LogScheduler() this.tools = new Tools() this.bookServiceBasic = new BookServiceBasic(); this.presetService = new PresetService() @@ -528,7 +528,7 @@ export class MJOpt { let taskRes = await this.bookServiceBasic.AddBookBackTask(element.bookId, BookBackTaskType.MJ_IMAGE, TaskExecuteType.AUTO, element.bookTaskId, element.id, responseMessageName ); // 添加返回日志 - await this.taskScheduler.AddLogToDB(element.bookId, BookBackTaskType.MJ_IMAGE, `添加 ${element.name} MJ生成任务成功`, element.bookTaskId, LoggerStatus.SUCCESS) + await this.logScheduler.AddLogToDB(element.bookId, BookBackTaskType.MJ_IMAGE, `添加 ${element.name} MJ生成任务成功`, element.bookTaskId, LoggerStatus.SUCCESS) } // 全部完毕 return successMessage(null, "MJ添加生成图片任务成功", "MJOpt_AddGenerateImageTask") diff --git a/src/main/Service/SD/sd.ts b/src/main/Service/SD/sd.ts index 253c0cd..a019aa5 100644 --- a/src/main/Service/SD/sd.ts +++ b/src/main/Service/SD/sd.ts @@ -48,8 +48,10 @@ export class SDOpt { const id = ids[i]; let scene = await this.presetService.GetScenePresetDetailById(id) if (scene.code == 1) { - // 这边开始拼接 - result += scene.data.prompt + ', ' + if (scene.data) { + // 这边开始拼接 + result += scene.data.prompt + ', ' + } } else { throw new Error(scene.message) } @@ -68,9 +70,11 @@ export class SDOpt { const id = ids[i]; let character = await this.presetService.GetCharacterPresetDetailById(id) if (character.code == 1) { - result += character.data.prompt + ', ' - if (character.data.lora && character.data.lora != '无' && character.data.loraWeight) { - result += `, ` + if (character.data) { + result += character.data.prompt + ', ' + if (character.data.lora && character.data.lora != '无' && character.data.loraWeight) { + result += `, ` + } } } else { throw new Error(character.message) diff --git a/src/main/Service/ServiceBasic/bookServiceBasic.ts b/src/main/Service/ServiceBasic/bookServiceBasic.ts index d631370..1479af5 100644 --- a/src/main/Service/ServiceBasic/bookServiceBasic.ts +++ b/src/main/Service/ServiceBasic/bookServiceBasic.ts @@ -69,7 +69,7 @@ class BookServiceBasic { GetBookTaskDetailDataById = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailDataById(bookTaskDetailId); GetBookTaskDetailData = async (condition: Book.QueryBookTaskDetailCondition, returnEmpty: boolean = false) => await this.bookTaskDetailServiceBasic.GetBookTaskDetailData(condition, returnEmpty); UpdateBookTaskDetail = async (bookTaskDetailId: string, data: Book.SelectBookTaskDetail) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetail(bookTaskDetailId, data); - UpdateBookTaskStatus = async (bookTaskDetailId: string, status: BookTaskStatus) => await this.bookTaskDetailServiceBasic.UpdateBookTaskStatus(bookTaskDetailId, status); + UpdateBookTaskStatus = async (bookTaskDetailId: string, status: BookTaskStatus,errorMsg? : string) => await this.bookTaskDetailServiceBasic.UpdateBookTaskStatus(bookTaskDetailId, status,errorMsg); DeleteBookTaskDetailReversePromptById = async (bookTaskDetailId: string, reversePromptId: string) => await this.bookTaskDetailServiceBasic.DeleteBookTaskDetailReversePromptById(bookTaskDetailId); DeleteBoookTaskDetailGenerateImage = async (bookTaskDetailId: string) => await this.bookTaskDetailServiceBasic.DeleteBoookTaskDetailGenerateImage(bookTaskDetailId); UpdateBookTaskDetailReversePrompt = async (bookTaskDetailId: string, reversePromptId: string, data: Book.ReversePrompt) => await this.bookTaskDetailServiceBasic.UpdateBookTaskDetailReversePrompt(bookTaskDetailId, reversePromptId, data); diff --git a/src/main/Service/Subtitle/subtitle.ts b/src/main/Service/Subtitle/subtitle.ts index 0f470a3..12f9f53 100644 --- a/src/main/Service/Subtitle/subtitle.ts +++ b/src/main/Service/Subtitle/subtitle.ts @@ -16,7 +16,7 @@ import fs from 'fs' import { GeneralResponse } from '../../../model/generalResponse' import { BookServiceBasic } from '../ServiceBasic/bookServiceBasic' import { LoggerStatus, OtherData, ResponseMessageType } from '../../../define/enum/softwareEnum' -import { TaskScheduler } from '../task/taskScheduler' +import { LogScheduler } from '../task/logScheduler' import { SubtitleModel } from '../../../model/subtitle' import { BookTaskStatus, OperateBookType } from '../../../define/enum/bookEnum' import axios from 'axios' @@ -24,8 +24,9 @@ import { GptService } from '../GPT/gpt' import FormData from 'form-data' import { RetryWithBackoff } from '../../../define/Tools/common' import { DEFINE_STRING } from '../../../define/define_string' +import { DraftTimeLineJson } from '../jianying/jianyingService' const util = require('util') -const { exec } = require('child_process') +const { spawn, exec } = require('child_process') const execAsync = util.promisify(exec) const fspromises = fs.promises @@ -36,12 +37,12 @@ const fspromises = fs.promises export class Subtitle { ffmpegOptions: FfmpegOptions bookServiceBasic: BookServiceBasic - taskScheduler: TaskScheduler + logScheduler: LogScheduler gptService: GptService constructor() { this.bookServiceBasic = new BookServiceBasic() - this.taskScheduler = new TaskScheduler() + this.logScheduler = new LogScheduler() this.ffmpegOptions = new FfmpegOptions() this.gptService = new GptService() } @@ -74,28 +75,6 @@ export class Subtitle { return frameTimes } - /** - * 加载指定的的小说相关的所有的数据 - * @param bookId 小说ID - * @param bookTaskId 小说任务ID - * @returns - */ - async GetBookAllData(bookId: string, bookTaskId: string = null): Promise<{ book: Book.SelectBook, bookTask: Book.SelectBookTask, bookTaskDetails: Book.SelectBookTaskDetail[] }> { - let { book, bookTask } = await this.bookServiceBasic.GetBookAndTask(bookId, bookTaskId ? bookTaskId : 'output_00001') - if (isEmpty(book.subtitlePosition)) { - throw new Error("请先设置小说的字幕位置") - } - // 获取所有的分镜数据 - let bookTaskDetails = await this.bookServiceBasic.GetBookTaskDetailData({ - bookId: bookId, - bookTaskId: bookTask.id - }) - if (bookTaskDetails.length <= 0) { - throw new Error("没有找到对应的分镜数据,请先执行对应的操作") - } - return { book, bookTask, bookTaskDetails } - } - /** * 通用的小说获取分案的返回方法 * @param content 获取的文案内容 @@ -123,7 +102,7 @@ export class Subtitle { }, DEFINE_STRING.BOOK.GET_COPYWRITING_RETURN) // 添加日志 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( book.id, book.type, `${bookTaskDetail.name} 识别文案成功`, @@ -562,7 +541,7 @@ export class Subtitle { }) // 推送成功消息 - await this.taskScheduler.AddLogToDB( + await this.logScheduler.AddLogToDB( book.id, book.type, `${bookTaskDetail.name}分离音频成功,输出地址:${audioPath}`, @@ -667,4 +646,73 @@ export class Subtitle { } } //#endregion + + + //#region 本地Whisper识别字幕相关操作 + + async GetTextByLocalWhisper(frameTimeList: DraftTimeLineJson[], outDir: string, mp3Dir: string, localWhisperPath?: string): Promise { + try { + let localWhisperPathExePath = localWhisperPath + if (isEmpty(localWhisperPathExePath)) { + localWhisperPathExePath = path.join(define.scripts_path, 'localWhisper/local_whisper.exe') + } + return new Promise((resolve, reject) => { + let child = spawn( + localWhisperPathExePath, + ['-ts', outDir, mp3Dir], + { encoding: 'utf-8' } + ); + child.on('error', (error) => { + console.log('error=', error) + this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败输出,失败信息如下:" + error.message)) + reject(new Error(error.message)) + }) + child.stdout.on('data', (data) => { + console.log(data.toString()) + this.logScheduler.ReturnLogger(successMessage(data.toString(), "使用localWhisper识别字幕输出")) + }) + child.stderr.on('data', (data) => { + console.log('stderr=', data.toString()) + this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败输出,失败信息如下:stderr = " + data.toString())) + reject(new Error(data.toString())) + }) + + child.on('close', async (data) => { + console.log('data=', data.toString()) + this.logScheduler.ReturnLogger(successMessage(data.toString(), "使用localWhisper识别字幕完成")) + let textPath = path.join(outDir, '文案.txt') + if (!await CheckFileOrDirExist(textPath)) { + throw new Error('没有找到识别输出的文案文件') + } + let text = await fspromises.readFile(textPath, 'utf-8') + let textLines = text.split(/\r?\n/) + let lastLine = textLines[textLines.length - 1] + // 丢掉最后一行 + textLines = textLines.slice(0, -1) + + if (textLines.length != frameTimeList.length) { + throw new Error('分镜和识别文案数量不一致') + } + // 保存文案 + for (let i = 0; i < textLines.length; i++) { + const element = textLines[i]; + frameTimeList[i].text = element + } + // 写出 + await fspromises.writeFile(path.join(global.config.project_path, '文案.txt'), textLines.join('\n'), 'utf-8') + if (data == 0) { + this.logScheduler.ReturnLogger(successMessage(null, "使用localWhisper识别字幕完成")) + } else { + this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败,失败信息请查看日志")) + } + resolve(); + }) + }) + + } catch (error) { + this.logScheduler.ReturnLogger(errorMessage("使用localWhisper识别字幕失败,失败信息如下:" + error.message)) + throw error + } + } + //#endregion } diff --git a/src/main/Service/Subtitle/subtitleService.ts b/src/main/Service/Subtitle/subtitleService.ts index 6502a0f..3144c2d 100644 --- a/src/main/Service/Subtitle/subtitleService.ts +++ b/src/main/Service/Subtitle/subtitleService.ts @@ -11,7 +11,7 @@ import fs from 'fs' import { CheckFileOrDirExist } from "../../../define/Tools/file"; import { BookServiceBasic } from "../ServiceBasic/bookServiceBasic"; import { Subtitle } from "./subtitle"; -import { TaskScheduler } from "../task/taskScheduler"; +import { LogScheduler } from "../task/logScheduler"; import { BookTaskStatus, BookType, OperateBookType } from "../../../define/enum/bookEnum"; import { Book } from "../../../model/book"; import { TimeStringToMilliseconds } from "../../../define/Tools/time"; @@ -20,7 +20,7 @@ export class SubtitleService { softWareServiceBasic: SoftWareServiceBasic bookServiceBasic: BookServiceBasic subtitle: Subtitle - taskScheduler: TaskScheduler + logScheduler: LogScheduler constructor() { this.softWareServiceBasic = new SoftWareServiceBasic(); this.bookServiceBasic = new BookServiceBasic(); diff --git a/src/main/Service/ffmpegOptions.ts b/src/main/Service/ffmpegOptions.ts index c7645f9..40da69c 100644 --- a/src/main/Service/ffmpegOptions.ts +++ b/src/main/Service/ffmpegOptions.ts @@ -98,8 +98,6 @@ export class FfmpegOptions { /** * FFmpeg裁剪视频,将一个视频将裁剪指定的时间内的片段 - * @param {*} book 小说对象类 - * @param {*} bookTask 小说批次任务对象类 * @param {*} startTime 开始时间 * @param {*} endTime 结束时间 * @param {*} videoPath 视频地址 @@ -225,7 +223,7 @@ export class FfmpegOptions { // 判断分镜是不是和数据库中的数据匹配的上 let res = await new Promise((resolve, reject) => { Ffmpeg(videoPath) - .inputOptions([`-ss ${MillisecondsToTimeString(frameTime)}`]) + .inputOptions([`-ss ${MillisecondsToTimeString(Math.ceil(frameTime))}`]) .output(outFramePath) .frames(1) .on('end', async function () { diff --git a/src/main/Service/jianying/jianyingService.ts b/src/main/Service/jianying/jianyingService.ts new file mode 100644 index 0000000..a0385a7 --- /dev/null +++ b/src/main/Service/jianying/jianyingService.ts @@ -0,0 +1,216 @@ +import path from 'path'; +import { CheckFileOrDirExist, DeleteFolderAllFile } from '../../../define/Tools/file'; +import fs from "fs"; +import { ValidateJson } from '../../../define/Tools/validate'; +import { FfmpegOptions } from '../ffmpegOptions'; + +/** + * 存放剪映草稿的时间轴数据 + */ +export type DraftTimeLineJson = { + name: string; + startTime: number; + endTime: number; + durationTime: number; + middleTime: number; + videoPath: string; + text: string; + framePath: string; + subVideoPath?: string; + audioPath?: string; +} + +/** + * 剪映的一些服务 + */ +class JianyingService { + draftTimeLine: DraftTimeLineJson[]; + draftJson: any; + ffmpegOptions: FfmpegOptions + constructor() { + this.draftTimeLine = []; + this.ffmpegOptions = new FfmpegOptions(); + } + + /** + * 获取剪映草稿的关键帧和文本 + * @param draftDir 草稿目录 + * @param projectDir 项目目录 + * @param packagePath 包路径 + */ + async GetDraftFrameAndText(draftDir: string, projectDir: string, packagePath: string) { + try { + // 获取草稿文件路径 + let draftJsonPath = path.resolve(draftDir, "draft_content.json"); + if (!await CheckFileOrDirExist(draftJsonPath)) { + throw new Error("剪映草稿文件不存在,请先检查"); + } + // 读取草稿文件内容 + let draftJsonString = await fs.promises.readFile(draftJsonPath, "utf-8"); + if (!ValidateJson(draftJsonString)) { + throw new Error("剪映草稿文件格式错误,请检查"); + } + this.draftJson = JSON.parse(draftJsonString); + + // 检查输出文件夹是否存在 + let projectTmp = path.resolve(projectDir, "tmp"); + if (await CheckFileOrDirExist(projectTmp)) { + // 删除文件夹 + await DeleteFolderAllFile(projectTmp); + } + // 创建输出文件夹 + let projectInput = path.resolve(projectTmp, "input_crop"); + console.log("projectInput", projectInput); + + // 获取剪映的轨道,并且判断是否包含一个video轨道和一个text轨道 + let draftTracks = this.draftJson.tracks; + if (!draftTracks) { + throw new Error("剪映草稿文件格式错误,没有轨道,请检查"); + } + let hasVideo = draftTracks.some((track: any) => track.type === "video"); + let hasText = draftTracks.some((track: any) => track.type === "text"); + + if (!(this.draftJson.tracks && hasVideo && hasText)) { + throw new Error("没有检测到剪映草稿文件中的video和text轨道,请检查"); + } + + // 获取视频节点 + let videoNodes = draftTracks.filter((track: any) => track.type === "video")[0].segments; + this.GetVideoTime(videoNodes); + + // 获取文本节点 + let textNodes = draftTracks.filter((track: any) => track.type === "text")[0].segments; + this.GetTextTime(textNodes); + + console.log("场景数:", this.draftTimeLine.length); + + // 将数据写入到文件中 + let txtData = this.draftTimeLine.map((item) => item.text); + let txtPath = path.resolve(projectDir, "文案.txt"); + await fs.promises.writeFile(txtPath, txtData.join("\n"), "utf-8"); + + // 开始抽取关键帧 + await this.GetDraftFrame(projectInput); + + // 将数据写入到json文件中 + let jsonPath = path.resolve(projectDir, "draftFrameData.json"); + await fs.promises.writeFile(jsonPath, JSON.stringify(this.draftTimeLine), "utf-8"); + console.log("GetDraftFrameAndText", jsonPath); + } catch (error) { + throw error; + } + } + + /** + * 在节点数组中查找指定类型和值的节点 + * @param nodes 节点数组 + * @param type 节点类型 + * @param value 节点值 + * @returns 找到的节点 + * @throws 如果没有找到对应的节点则抛出错误 + */ + private FindNode(nodes: any[], type: string, value: any) { + let res = nodes.filter((node: any) => node[type] === value); + if (res.length === 0) { + throw new Error("没有找到对应的节点"); + } + return res[0]; + } + + /** + * 判断文本是否在时间轴内 + * @param draftTimeObject 时间轴对象 + * @param textStartTime 文本开始时间 + * @param textEndTile 文本结束时间 + * @returns 如果文本在时间轴内则返回 true,否则返回 false + */ + private TextIsInTimeLine(draftTimeObject: DraftTimeLineJson, textStartTime: number, textEndTile: number) { + return textStartTime >= draftTimeObject.startTime && textEndTile <= draftTimeObject.endTime; + } + + /** + * 抽取剪映草稿的关键帧 + * @param projectInput 项目输入目录 + */ + private async GetDraftFrame(projectInput: string): Promise { + for (let i = 0; i < this.draftTimeLine.length; i++) { + const element = this.draftTimeLine[i]; + let outImagePath = path.resolve(projectInput, (i + 1).toString().padStart(5, "0") + ".png"); + + // 使用 ffmpeg 抽取关键帧 + let frameRes = await this.ffmpegOptions.FfmpegGetFrame(element.middleTime / 1000, element.videoPath, outImagePath); + if (frameRes.code == 0) { + throw new Error(frameRes.message); + } + this.draftTimeLine[i].framePath = outImagePath; + console.log("已经抽取第", i + 1, "帧"); + } + } + + /** + * 获取文本时间 + * @param textNodes 文本节点数组 + */ + private GetTextTime(textNodes: any[]): void { + let tempText = ""; + let count = 0; + for (let i = 0; i < textNodes.length; i++) { + const element = textNodes[i]; + let textStartTime = element.target_timerange.start; + let textEndTime = textStartTime + element.target_timerange.duration; + let textMaterialId = element.material_id; + let textMaterialNode = this.FindNode(this.draftJson.materials.texts, "id", textMaterialId); + let textContent = textMaterialNode.content; + let textContentJson = JSON.parse(textContent); + + let text = textContentJson.text + "。"; + + // 不在视频的时间轴内,丢弃 + if (count > this.draftTimeLine.length - 1) { + break; + } + if (this.TextIsInTimeLine(this.draftTimeLine[count], textStartTime, textEndTime)) { + tempText += text; + if (i == textNodes.length - 1) { + this.draftTimeLine[count].text = tempText; + } + } else { + this.draftTimeLine[count].text = tempText; + tempText = text; + count += 1; + } + } + } + + /** + * 获取视频时间 + * @param videoNodes 视频节点数组 + */ + private GetVideoTime(videoNodes: any[]): void { + for (let i = 0; i < videoNodes.length; i++) { + const element = videoNodes[i]; + let startTime = element.target_timerange.start; + let endTime = startTime + element.target_timerange.duration; + let durationTime = element.target_timerange.duration; + let middleTime = startTime + ((endTime - startTime) / 2); + + let videoId = element.material_id; + let materialNode = this.FindNode(this.draftJson.materials.videos, "id", videoId); + let videoPath = materialNode.path; + this.draftTimeLine.push({ + name: (i + 1).toString().padStart(5, "0"), + startTime, + endTime, + durationTime, + middleTime, + videoPath, + text: "", + framePath: undefined + }) + } + } + +} + + +export default JianyingService; \ No newline at end of file diff --git a/src/main/Service/task/taskScheduler.ts b/src/main/Service/task/logScheduler.ts similarity index 78% rename from src/main/Service/task/taskScheduler.ts rename to src/main/Service/task/logScheduler.ts index 05bfe64..4f9eb4e 100644 --- a/src/main/Service/task/taskScheduler.ts +++ b/src/main/Service/task/logScheduler.ts @@ -4,7 +4,7 @@ import { LoggerStatus, OtherData } from '../../../define/enum/softwareEnum' import { successMessage, errorMessage } from '../../Public/generalTools' import { GeneralResponse } from '../../../model/generalResponse' -export class TaskScheduler { +export class LogScheduler { constructor() { } /** * 添加日志到数据库,然后返回日志信息到前端,日志记录失败不会报错 @@ -20,7 +20,7 @@ export class TaskScheduler { type: string, content: string, bookTaskId: string, - status = LoggerStatus.DOING + status: LoggerStatus = LoggerStatus.DOING ): Promise { try { let log = { @@ -38,7 +38,16 @@ export class TaskScheduler { return res } catch (error) { - return errorMessage(error.message, 'TaskScheduler_AddLogToDB') + return errorMessage(error.message, 'LogScheduler_AddLogToDB') } } + + /** + * 将日志返回到前端 + * @param {*} log 日志信息 + * @returns + */ + ReturnLogger(log: GeneralResponse.ErrorItem | GeneralResponse.SuccessItem) { + global.newWindow[0].win.webContents.send(DEFINE_STRING.SYSTEM.RETURN_LOGGER, log) + } } diff --git a/src/main/Service/videoService/videoHandle.ts b/src/main/Service/videoService/videoHandle.ts new file mode 100644 index 0000000..8ae2426 --- /dev/null +++ b/src/main/Service/videoService/videoHandle.ts @@ -0,0 +1,333 @@ + +import path from 'path'; +import { CheckFileOrDirExist, DeleteFolderAllFile } from '../../../define/Tools/file'; +import fs from 'fs'; +import { LogScheduler } from '../task/logScheduler'; +import { successMessage } from '../../Public/generalTools'; +import { define } from '../../../define/define'; +import util from 'util'; +import { exec } from 'child_process'; +import { ValidateJson } from '../../../define/Tools/validate'; +import { DraftTimeLineJson } from '../jianying/jianyingService'; +const execAsync = util.promisify(exec) +import { FfmpegOptions } from '../ffmpegOptions'; +import { TimeStringToMilliseconds } from '../../../define/Tools/time'; +import { isEmpty } from 'lodash'; +import { Subtitle } from '../Subtitle/subtitle'; + + +type VideoHandleShortVideoTimeLine = { + name: string, + startTime: number; + endTime: number; + videoPath: string, + duration: number +} + +class VideoHandle { + logScheduler: LogScheduler; + ffmpegOptions: FfmpegOptions; + subtitle: Subtitle; + constructor() { + this.logScheduler = new LogScheduler() + this.ffmpegOptions = new FfmpegOptions(); + this.subtitle = new Subtitle(); + } + + + public async StartStoryboarding(videoPath: string, sensitivity: number) { + // 检查抽帧文件是不是存在 + let framePath = path.resolve(global.config.project_path, "data/frame"); + if (await CheckFileOrDirExist(framePath)) { + await DeleteFolderAllFile(framePath); + } else { + await fs.promises.mkdir(framePath, { recursive: true }) + } + // 检查输入文件是不是存在 + let inputPath = path.resolve(global.config.project_path, "tmp/input_crop"); + if (await CheckFileOrDirExist(inputPath)) { + await DeleteFolderAllFile(inputPath); + } else { + await fs.promises.mkdir(inputPath, { recursive: true }) + } + + // 检查本事localwhisper是不是存在 + let localwhisperPath = path.resolve(define.scripts_path, "localWhisper/local_whisper.exe"); + if (!await CheckFileOrDirExist(localwhisperPath)) { + throw new Error('localwhisper 不存在,请查看文档安装localwhisper插件环境'); + } + + // 判断输出文件是不是存在,存在删除 + let frameJson = path.resolve(global.config.project_path, "data/frameTimeLine.json"); + if (await CheckFileOrDirExist(frameJson)) { + await fs.promises.unlink(frameJson); + } + + // 开始计算分镜 + let frameTimeList = await this.ComputedFrameTime(videoPath, frameJson, sensitivity); + + // 开始对视频进行切割 + // 先计算时间点 + let shortVideo = [] as VideoHandleShortVideoTimeLine[]; + shortVideo = await this.VideoShortClip(frameTimeList, videoPath, 0.5 * 60 * 1000); + + // 检查长度 + if (shortVideo.length != frameTimeList.length) { + throw new Error('分镜数据和切割视频数据不一致,请检查'); + } + + // 开始切割视频 + console.log(shortVideo); + let subVideoPath = await this.CutViodeToShortClip(shortVideo, frameTimeList) as string[]; + + // 开始抽帧 + await this.GetFrameFromCutVideo(shortVideo, inputPath, subVideoPath, frameTimeList); + + // 开始分离音频 + await this.SplitAudio(frameTimeList, framePath); + + // 开始提取字幕 + await this.subtitle.GetTextByLocalWhisper(frameTimeList, framePath, framePath, localwhisperPath); + + console.log(frameTimeList); + await fs.promises.writeFile(frameJson, JSON.stringify(frameTimeList), 'utf-8'); + } + + /** + * 分离音频 + * @param frameTimeList + * @param framePath + */ + async SplitAudio(frameTimeList: DraftTimeLineJson[], framePath: string) { + for (let i = 0; i < frameTimeList.length; i++) { + const element = frameTimeList[i]; + if (isEmpty(element.subVideoPath)) { + throw new Error('没有找到待分离的视频数据,请检查'); + } + if (!await CheckFileOrDirExist(element.subVideoPath)) { + throw new Error(`视频片段 ${element.subVideoPath} 不存在,请检查`); + } + let audioPath = path.resolve(framePath, `${element.name}.mp3`); + if (await CheckFileOrDirExist(audioPath)) { + await fs.promises.unlink(audioPath); + } + let res = await this.ffmpegOptions.FfmpegExtractAudio(element.subVideoPath, audioPath) + if (res.code == 0) { + throw new Error(res.message); + } + if (!await CheckFileOrDirExist(audioPath)) { + throw new Error(`分离音频 ${audioPath} 失败,没有找到分离后的音频文件,请检查`); + } + element.audioPath = audioPath; + } + } + + + async GetFrameFromCutVideo(shortVideo: VideoHandleShortVideoTimeLine[], inputPath: string, subVideoPath: string[], frameTimeList: DraftTimeLineJson[]) { + if (shortVideo.length != subVideoPath.length) { + throw new Error('视频片段和分镜数据不一致'); + } + if (shortVideo.length != frameTimeList.length) { + throw new Error('视频片段和分镜数据不一致'); + } + + let imagePath = [] as string[]; + for (let i = 0; i < shortVideo.length; i++) { + const element = shortVideo[i]; + if (!frameTimeList[i]) { + throw new Error('分镜数据和切割视频数据不一致,请检查'); + } + + let middleTime = element.startTime + ((element.endTime - element.startTime) / 2); + let outImagePath = path.resolve(inputPath, `${element.name}.png`); + let res = await this.ffmpegOptions.FfmpegGetFrame(middleTime, element.videoPath, outImagePath); + if (res.code == 0) { + throw new Error(res.message); + } + imagePath.push(outImagePath); + // 检查图片是否存在 + if (!await CheckFileOrDirExist(outImagePath)) { + throw new Error(`抽取的图片 ${outImagePath} 不存在,请检查`); + } + frameTimeList[i].framePath = outImagePath; + } + return imagePath; + } + + /** + * 将长视频按照指定的视频长度,给分割为一个个的小视频片段 + * @param shortVideo + */ + async CutViodeToShortClip(shortVideo: VideoHandleShortVideoTimeLine[], frameTimeList: DraftTimeLineJson[]): Promise { + let subVideoPaths = [] as string[]; + for (let i = 0; i < shortVideo.length; i++) { + const element = shortVideo[i]; + if (!frameTimeList[i]) { + throw new Error('分镜数据和切割视频数据不一致,请检查'); + } + let subVideoPath = path.resolve(global.config.project_path, `data/frame/${element.name}.mp4`); + // 开始截取视频 + let res = await this.ffmpegOptions.FfmpegCutVideo( + element.startTime, + element.endTime, + element.videoPath, + subVideoPath + ) + subVideoPaths.push(subVideoPath); + + if (res.code == 0) { + throw new Error(res.message); + } + if (!await CheckFileOrDirExist(subVideoPath)) { + throw new Error(`截取视频片段 ${subVideoPath} 不存在,请检查`); + } + frameTimeList[i].subVideoPath = subVideoPath; + } + return subVideoPaths; + } + + /** + * 预处理视频,将视频切割成小段,减少计算时间 + * @param frameTimeList 要处理的时间线,是个json数组 + * @param videoPath 要处理的视频地址 + * @param duration 视频的持续时间 + * @returns + */ + public async VideoShortClip(frameTimeList: any[], videoPath: string, duration: number): Promise { + let shortVideo = [] as VideoHandleShortVideoTimeLine[]; + let durationTime = 0; // 小视频片段的持续时间 + // let duration = 5 * 60 * 1000; // 5分钟 + let tempCount = 0; + let shotVideoPath = path.resolve(global.config.project_path, `data/temp_frame_${tempCount}.mp4`); // 新的视频路径 + let startTime = 0; // 开始时间 + let endTime = 0; // 结束时间 + let lastEndTime = 0; // 上一个结束时间 + for (let i = 0; i < frameTimeList.length; i++) { + const item = frameTimeList[i]; + let temRes = { + name: (i + 1).toString().padStart(5, "0"), + startTime: item.startTime - lastEndTime, + endTime: item.endTime - lastEndTime, + videoPath: shotVideoPath, + duration: item.endTime - item.startTime + } + endTime = item.endTime; + durationTime += item.endTime - item.startTime; + if (durationTime > duration) { // 判断条件切割视频 + // 开始切割视频 + let res = await this.ffmpegOptions.FfmpegCutVideo( + startTime, + endTime, + videoPath, + shotVideoPath + ) + if (res.code == 0) { + throw new Error(res.message); + } + lastEndTime = item.endTime; + tempCount++; + durationTime = 0; + startTime = endTime; + endTime = 0; + shotVideoPath = path.resolve(global.config.project_path, `data/temp_frame_${tempCount}.mp4`); + } + shortVideo.push(temRes) + } + // 最后一个也要切割 + if (durationTime > 0) { + let res = await this.ffmpegOptions.FfmpegCutVideo( + startTime, + endTime, + videoPath, + shotVideoPath + ) + if (res.code == 0) { + throw new Error(res.message); + } + } + // 将数据写出 + let shortVideoJson = path.resolve(global.config.project_path, "data/shortVideo.json"); + await fs.promises.writeFile(shortVideoJson, JSON.stringify(shortVideo), 'utf-8'); + return shortVideo; + } + + + /** + * 计算视频的帧时间。 + * + * @param {string} videoPath - 视频文件的路径。 + * @param {number} sensitivity - 分镜的敏感度。 + * @throws {Error} 如果视频文件不存在或分镜失败,将抛出错误。 + * + * @remarks + * 该方法首先检查视频文件是否存在,如果不存在则抛出错误。 + * 然后检查输出文件是否存在,如果存在则删除。 + * 接着调用外部程序进行分镜处理,并记录日志。 + * 如果分镜成功但有警告信息,将记录警告日志。 + * 最后检查分镜输出文件是否存在并读取数据,如果没有找到输出文件或数据为空,将抛出错误。 + */ + public async ComputedFrameTime(videoPath: string, frameJson: string, sensitivity: number): Promise { + + if (!await CheckFileOrDirExist(videoPath)) { + throw new Error('视频文件不存在,请检查'); + } + + this.logScheduler.ReturnLogger(successMessage(null, "前置检查结束,开始进行分镜", "VideoHandle_StartStoryboarding")); + + // 开始调用分镜 + let command = `"${path.join( + define.scripts_path, + 'Lai.exe' + )}" "-ka" "${videoPath}" "${frameJson}" "${sensitivity}"` + const output = await execAsync(command, { + maxBuffer: 1024 * 1024 * 10, + encoding: 'utf-8' + }) + // 有错误输出 + if (output.stderr != '') { + let error_msg = `分镜成功,但有警告提示:${output.stderr}` + this.logScheduler.ReturnLogger(successMessage(null, error_msg, "VideoHandle_StartStoryboarding")); + } + + // 分镜成功,处理输出 + let josnIsExist = await CheckFileOrDirExist(frameJson); + if (!josnIsExist) { + let error_message = `分镜失败,没有找到对应的分镜输出文件:${frameJson}` + this.logScheduler.ReturnLogger(successMessage(null, error_message, "VideoHandle_StartStoryboarding")); + throw new Error(error_message); + } + let frameJsonDataString = await fs.promises.readFile(frameJson, 'utf-8'); + let res = ValidateJson(frameJsonDataString); + if (!res) { + throw new Error('分镜数据不是有效的JSON格式,请检查'); + } + let frameJsonData = JSON.parse(frameJsonDataString); + if (frameJsonData.length <= 0) { + let error_msg = `分镜失败,没有找到对应的分镜数据` + this.logScheduler.ReturnLogger(successMessage(null, error_msg, "VideoHandle_StartStoryboarding")); + throw new Error(error_msg); + } + let result = [] as DraftTimeLineJson[]; + // 这边将分镜的数据进行一个处理 + for (let i = 0; i < frameJsonData.length; i++) { + const element = frameJsonData[i]; + let st = TimeStringToMilliseconds(element[0]); + let et = TimeStringToMilliseconds(element[1]); + let tempObject = { + name: (i + 1).toString().padStart(5, "0"), + startTime: st, + endTime: et, + middleTime: st + ((et - st) / 2), + videoPath: videoPath, + framePath: '', + text: "", + durationTime: et - st + } as DraftTimeLineJson; + result.push(tempObject); + } + return result; + } + +} + +export default VideoHandle; \ No newline at end of file diff --git a/src/main/Service/watermark.ts b/src/main/Service/watermark.ts index f2fb5cb..99d64a3 100644 --- a/src/main/Service/watermark.ts +++ b/src/main/Service/watermark.ts @@ -13,7 +13,7 @@ import { define } from '../../define/define' import { LOGGER_DEFINE } from '../../define/logger_define' import axios from 'axios' import { Base64ToFile, GetImageBase64 } from '../../define/Tools/image' -import { TaskScheduler } from './task/taskScheduler'; +import { LogScheduler } from './task/logScheduler'; import { LoggerStatus, OtherData, ResponseMessageType } from '../../define/enum/softwareEnum'; import { basicApi } from '../../api/apiBasic'; import { FfmpegOptions } from './ffmpegOptions'; @@ -28,7 +28,7 @@ import { BookTaskService } from '../../define/db/service/Book/bookTaskService'; export class Watermark { softwareService: SoftwareService - taskScheduler: TaskScheduler; + logScheduler: LogScheduler; bookService: BookService bookTaskDetailService: BookTaskDetailService bookTaskService: BookTaskService @@ -39,8 +39,8 @@ export class Watermark { if (!this.softwareService) { this.softwareService = await SoftwareService.getInstance() } - if (!this.taskScheduler) { - this.taskScheduler = new TaskScheduler() + if (!this.logScheduler) { + this.logScheduler = new LogScheduler() } if (!this.bookService) { this.bookService = await BookService.getInstance() @@ -449,7 +449,7 @@ export class Watermark { }, DEFINE_STRING.BOOK.REMOVE_WATERMARK_RETURN) - this.taskScheduler.AddLogToDB(book.id, book.type, `${element.name} 去除水印完成`, element.bookTaskId, LoggerStatus.SUCCESS) + this.logScheduler.AddLogToDB(book.id, book.type, `${element.name} 去除水印完成`, element.bookTaskId, LoggerStatus.SUCCESS) } // 全部完毕 if (operateBookType == OperateBookType.BOOKTASKDETAIL) { diff --git a/src/main/func.js b/src/main/func.js index 85ed471..2f3dcbf 100644 --- a/src/main/func.js +++ b/src/main/func.js @@ -1,612 +1,636 @@ -const fspromises = require("fs").promises; -const { clipboard, shell, ipcRenderer } = require('electron'); -const sharp = require('sharp'); -const path = require("path"); -const util = require('util'); -const { spawn, exec } = require('child_process'); -const execAsync = util.promisify(exec); -const { v4: uuidv4 } = require('uuid'); // 引入UUID库来生成唯一标识符 -const EventEmitter = require('events'); -import { define } from "../define/define"; -import axios from "axios"; -import { DEFINE_STRING } from "../define/define_string"; -import { ClipDraft } from "./Public/clipDraft"; -import { Tools } from "./tools"; -import { PublicMethod } from "./Public/publicMethod" -import { ImageStyleDefine } from "../define/iamgeStyleDefine"; -let tools = new Tools(); -let pm = new PublicMethod(global); +const fspromises = require('fs').promises +const { clipboard, shell, ipcRenderer } = require('electron') +const sharp = require('sharp') +const path = require('path') +const util = require('util') +const { spawn, exec } = require('child_process') +const execAsync = util.promisify(exec) +const { v4: uuidv4 } = require('uuid') // 引入UUID库来生成唯一标识符 +const EventEmitter = require('events') +import { define } from '../define/define' +import axios from 'axios' +import { DEFINE_STRING } from '../define/define_string' +import { ClipDraft } from './Public/clipDraft' +import { Tools } from './tools' +import { PublicMethod } from './Public/publicMethod' +import { ImageStyleDefine } from '../define/iamgeStyleDefine' +let tools = new Tools() +let pm = new PublicMethod(global) import { FLxuAPIImageType } from '../define/enum/image' +import JianyingService from './Service/jianying/jianyingService' +import VideoHandle from './Service/videoService/videoHandle' +import { successMessage } from './Public/generalTools' /** * 获取对应的轨道 */ function find_draft_node(nodes, type, value) { - for (let index = 0; index < nodes.length; index++) { - let node = nodes[index]; - if (node[type] == value) { - return node - } + for (let index = 0; index < nodes.length; index++) { + let node = nodes[index] + if (node[type] == value) { + return node } + } } /** * 判断文件夹是不是存在 - * @param {文件夹地址} folderPath + * @param {文件夹地址} folderPath * @returns true/false */ async function isDirectory(folderPath) { - try { - const stats = await fspromises.stat(folderPath); - return stats.isDirectory(); - } catch (error) { - if (error.code === 'ENOENT') { - return false; - } - throw error; + try { + const stats = await fspromises.stat(folderPath) + return stats.isDirectory() + } catch (error) { + if (error.code === 'ENOENT') { + return false } + throw error + } } /** * 保存新的字幕 - * @param {洗稿后的值} value + * @param {洗稿后的值} value */ async function SaveNewWord(value) { - let new_txt = path.join(global.config.project_path, "new_word.txt") - // 写到一个新的txt文件里面 - let dataString = await tools.writeArrayToFile(value, new_txt); - clipboard.writeText(dataString); - return { - code: 1 - } + let new_txt = path.join(global.config.project_path, 'new_word.txt') + // 写到一个新的txt文件里面 + let dataString = await tools.writeArrayToFile(value, new_txt) + clipboard.writeText(dataString) + return { + code: 1 + } } /** * 提取草稿中的温馨提示,全部提取直接用 - * @param {} value + * @param {} value */ async function GetDraftFriendlyReminder(value) { - try { - // console.log(value); - let draft_content_json_path = path.join(global.config.draft_path, `${value[0]}/draft_content.json`); - let old_friendly_reminder_json = await getClipSetting("friendly_reminder_setting"); - // 判断当前的名称是不是存在 - if (old_friendly_reminder_json.code == 0) { - throw new Error(old_friendly_reminder_json.message) - } - let filter_value = old_friendly_reminder_json.value.filter(item => item.name == value[1]); - if (filter_value.length > 0) { - return { - code: 0, - message: "名字重复" - } - } - // 开始提取 - let draft_json = JSON.parse(await fspromises.readFile(draft_content_json_path)); - // console.log(draft_json) - let material_animations = draft_json.materials.material_animations[0]; - let texts = draft_json.materials.texts[0]; - let tracks = draft_json.tracks[1]; - let text_value = JSON.parse(texts.content).text; - let obj = { - id: uuidv4(), - name: value[1], - material_animations, - texts, - tracks, - text_value: text_value - } - // console.log(obj) - // 开始写入 - let clip_setting_json = JSON.parse(await fspromises.readFile(define.clip_setting)); - clip_setting_json.friendly_reminder_setting.push(obj); - await fspromises.writeFile(define.clip_setting, JSON.stringify(clip_setting_json)); - return { - code: 1 - } - } catch (error) { - throw new Error(error); + try { + // console.log(value); + let draft_content_json_path = path.join( + global.config.draft_path, + `${value[0]}/draft_content.json` + ) + let old_friendly_reminder_json = await getClipSetting('friendly_reminder_setting') + // 判断当前的名称是不是存在 + if (old_friendly_reminder_json.code == 0) { + throw new Error(old_friendly_reminder_json.message) } - + let filter_value = old_friendly_reminder_json.value.filter((item) => item.name == value[1]) + if (filter_value.length > 0) { + return { + code: 0, + message: '名字重复' + } + } + // 开始提取 + let draft_json = JSON.parse(await fspromises.readFile(draft_content_json_path)) + // console.log(draft_json) + let material_animations = draft_json.materials.material_animations[0] + let texts = draft_json.materials.texts[0] + let tracks = draft_json.tracks[1] + let text_value = JSON.parse(texts.content).text + let obj = { + id: uuidv4(), + name: value[1], + material_animations, + texts, + tracks, + text_value: text_value + } + // console.log(obj) + // 开始写入 + let clip_setting_json = JSON.parse(await fspromises.readFile(define.clip_setting)) + clip_setting_json.friendly_reminder_setting.push(obj) + await fspromises.writeFile(define.clip_setting, JSON.stringify(clip_setting_json)) + return { + code: 1 + } + } catch (error) { + throw new Error(error) + } } /** * 执行单张重绘的任务 - * @param {执行操作的window} window - * @param {传入的值} value - * @returns + * @param {执行操作的window} window + * @param {传入的值} value + * @returns */ async function ReGenerateImageOne(window, value) { - // console.log(value) - // 将任务加入队列 + // console.log(value) + // 将任务加入队列 - let sd_setting = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - global.requestQuene.enqueue(async () => { - let id = value[1].id; - try { - // 请求一次 - let headers = { "Accept": "application/json", "Content-Type": "application/json" }; - let image_path = value[1].image; - let json_path = path.join(path.dirname(path.dirname(image_path)), "input_crop/" + value[1].name + ".json"); - let json = JSON.parse(await fspromises.readFile(json_path, 'utf-8')); + let sd_setting = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + global.requestQuene.enqueue( + async () => { + let id = value[1].id + try { + // 请求一次 + let headers = { Accept: 'application/json', 'Content-Type': 'application/json' } + let image_path = value[1].image + let json_path = path.join( + path.dirname(path.dirname(image_path)), + 'input_crop/' + value[1].name + '.json' + ) + let json = JSON.parse(await fspromises.readFile(json_path, 'utf-8')) - let tmp_image_path = image_path.split(".png")[0] + "_tmp.png"; + let tmp_image_path = image_path.split('.png')[0] + '_tmp.png' - let image_styles = await ImageStyleDefine.getImageStyleStringByIds(value[1].image_style_list ? value[1].image_style_list : []); + let image_styles = await ImageStyleDefine.getImageStyleStringByIds( + value[1].image_style_list ? value[1].image_style_list : [] + ) - let prompt = sd_setting.webui.prompt + image_styles; - // 拼接提示词 - if (value[1].image_style != null) { - prompt += `((${value[1].image_style})),`; - } - if (value[1].lora != null) { - prompt += `${value[1].lora},`; - } - prompt += value[1].prompt; - - - let model = value[1].model; - - // 判断当前是不是有开修脸修手 - let ADetailer = { - args: sd_setting.adetailer - }; - // 判断请求的模式 - if (model == "img2img") { - let web_api = global.config.webui_api_url + 'sdapi/v1/img2img' - let sd_config = json["webui_config"]; - sd_config["seed"] = -1 - // 拼接后的提示词 - sd_config.prompt = prompt; - let init_image = sd_config.init_images; - let im = await fspromises.readFile(init_image, 'binary'); - - sd_config.init_images = [new Buffer.from(im, 'binary').toString('base64')]; - - sd_config.denoising_strength = value[1].denoising_strength; - - if (value[1].adetailer) { - let ta = { - ADetailer: ADetailer - } - sd_config.alwayson_scripts = ta; - } - sd_config.height = sd_setting.webui.height; - sd_config.width = sd_setting.webui.width; - - const response = await axios.post(web_api, sd_config); - // console.log(response); - - // 目前是单图出图 - let images = response.data.images; - let imageData = Buffer.from(images[0].split(",", 1)[0], 'base64'); - sharp(imageData) - .toFile(tmp_image_path) - .then(async () => { - // console.log("图生图成功" + image_path); - await tools.deletePngAndDeleteExifData(tmp_image_path, image_path); - window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { id, code: 1, type: value[2] }); - }) - .catch(err => { - throw new Error(err); - }); - - } else if (model == "txt2img") { - let body = { - "prompt": prompt, - "negative_prompt": value[1].negative_prompt, - "seed": -1, - "sampler_name": value[1].sampler_name, - // 提示词相关性 - "cfg_scale": json.webui_config.cfg_scale, - "width": sd_setting.webui.width, - "height": sd_setting.webui.height, - "batch_size": 1, - "n_iter": 1, - "steps": json.webui_config.steps, - "save_images": false, - } - let web_api = global.config.webui_api_url + 'sdapi/v1/txt2img' - if (value[1].adetailer) { - let ta = { - ADetailer: ADetailer - } - body.alwayson_scripts = ta; - } - const response = await axios.post(web_api, body); - // console.log(response); - - // 目前是单图出图 - let images = response.data.images; - let imageData = Buffer.from(images[0].split(",", 1)[0], 'base64'); - sharp(imageData) - .toFile(tmp_image_path) - .then(async () => { - // // console.log("生图成功" + image_path); - await tools.deletePngAndDeleteExifData(tmp_image_path, image_path); - window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { id, code: 1, type: value[2] }); - }) - .catch(err => { - throw new Error(err); - }); - - } else { - throw new Error("SD 模式错误"); - } - } catch (error) { - window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { - id, - code: 0, - type: value[2], - message: `Error Message ${error}` - }); - return; + let prompt = sd_setting.webui.prompt + image_styles + // 拼接提示词 + if (value[1].image_style != null) { + prompt += `((${value[1].image_style})),` } - }, value[1].id, DEFINE_STRING.QUEUE_BATCH.SD_BACKSTEP_GENERATE_IMAGE) - return { - code: 1, - message: "加入队列成功" - } + if (value[1].lora != null) { + prompt += `${value[1].lora},` + } + prompt += value[1].prompt + + let model = value[1].model + + // 判断当前是不是有开修脸修手 + let ADetailer = { + args: sd_setting.adetailer + } + // 判断请求的模式 + if (model == 'img2img') { + let web_api = global.config.webui_api_url + 'sdapi/v1/img2img' + let sd_config = json['webui_config'] + sd_config['seed'] = -1 + // 拼接后的提示词 + sd_config.prompt = prompt + let init_image = sd_config.init_images + let im = await fspromises.readFile(init_image, 'binary') + + sd_config.init_images = [new Buffer.from(im, 'binary').toString('base64')] + + sd_config.denoising_strength = value[1].denoising_strength + + if (value[1].adetailer) { + let ta = { + ADetailer: ADetailer + } + sd_config.alwayson_scripts = ta + } + sd_config.height = sd_setting.webui.height + sd_config.width = sd_setting.webui.width + + const response = await axios.post(web_api, sd_config) + // console.log(response); + + // 目前是单图出图 + let images = response.data.images + let imageData = Buffer.from(images[0].split(',', 1)[0], 'base64') + sharp(imageData) + .toFile(tmp_image_path) + .then(async () => { + // console.log("图生图成功" + image_path); + await tools.deletePngAndDeleteExifData(tmp_image_path, image_path) + window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { + id, + code: 1, + type: value[2] + }) + }) + .catch((err) => { + throw new Error(err) + }) + } else if (model == 'txt2img') { + let body = { + prompt: prompt, + negative_prompt: value[1].negative_prompt, + seed: -1, + sampler_name: value[1].sampler_name, + // 提示词相关性 + cfg_scale: json.webui_config.cfg_scale, + width: sd_setting.webui.width, + height: sd_setting.webui.height, + batch_size: 1, + n_iter: 1, + steps: json.webui_config.steps, + save_images: false + } + let web_api = global.config.webui_api_url + 'sdapi/v1/txt2img' + if (value[1].adetailer) { + let ta = { + ADetailer: ADetailer + } + body.alwayson_scripts = ta + } + const response = await axios.post(web_api, body) + // console.log(response); + + // 目前是单图出图 + let images = response.data.images + let imageData = Buffer.from(images[0].split(',', 1)[0], 'base64') + sharp(imageData) + .toFile(tmp_image_path) + .then(async () => { + // // console.log("生图成功" + image_path); + await tools.deletePngAndDeleteExifData(tmp_image_path, image_path) + window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { + id, + code: 1, + type: value[2] + }) + }) + .catch((err) => { + throw new Error(err) + }) + } else { + throw new Error('SD 模式错误') + } + } catch (error) { + window[0].win.webContents.send(DEFINE_STRING.REGENERATE_IMAGE_RETUN, { + id, + code: 0, + type: value[2], + message: `Error Message ${error}` + }) + return + } + }, + value[1].id, + DEFINE_STRING.QUEUE_BATCH.SD_BACKSTEP_GENERATE_IMAGE + ) + return { + code: 1, + message: '加入队列成功' + } } /** * 添加草稿 - * @param {*} value + * @param {*} value */ async function addDraft(value) { - try { - let clip = new ClipDraft(global, value); - let res = await clip.addDraft(); + try { + let clip = new ClipDraft(global, value) + let res = await clip.addDraft() - return res; - } catch (error) { - return { - code: 0, - message: `An error occurred: ${error}` - } + return res + } catch (error) { + return { + code: 0, + message: `An error occurred: ${error}` } + } } /** - * + * * @returns 返回获取的字体样式的列表 */ async function getClipSetting(style_name) { - try { - let clip_setting = JSON.parse(await fspromises.readFile(define.clip_setting)); - return { - code: 1, - value: clip_setting[style_name] - } - } catch (error) { - return { - code: 0, - message: `Error message ${error, toString()}` - } + try { + let clip_setting = JSON.parse(await fspromises.readFile(define.clip_setting)) + return { + code: 1, + value: clip_setting[style_name] } + } catch (error) { + return { + code: 0, + message: `Error message ${(error, toString())}` + } + } } /** * 获取剪映的草稿字体设置 - * @param {传入草稿文件夹} value + * @param {传入草稿文件夹} value * @returns 返回样式字符串 */ async function getDraftTextStyle(value) { - let draft_path = path.join(global.config.draft_path, value[1].toString()); + let draft_path = path.join(global.config.draft_path, value[1].toString()) - try { - // 判断当前的名字是不是存在 - let name = value[0]; - // 直接保存 - let clip_setting = await fspromises.readFile(define.clip_setting); - let clip_setting_json = JSON.parse(clip_setting); - let exist_style = clip_setting_json.text_style; - let isExist = false; - exist_style.forEach(item => { - if (item.name == name) { - isExist = true; - } - }); - if (isExist) { - return { - code: 0, - message: "预设样式名称已存在" - } - } - - - let draft_config = JSON.parse(await fspromises.readFile(path.join(draft_path, 'draft_content.json'))); - let text = draft_config.materials.texts[0] - let content = text.content; - let text_json = JSON.parse(content); - let style = text_json.styles; - let srt_node = find_draft_node(draft_config.tracks, "type", "text").segments[0]; - let clip = srt_node.clip; - - let obj = { - name: value[0], - id: uuidv4(), - style, - font_size: text.font_size, - fonts: text.fonts.length > 0 ? text.fonts[0].title : "", - style_name: text.style_name, - clip, - ratio: draft_config.canvas_config.ratio - } - let text_style = clip_setting_json.text_style; - text_style.push(obj); - await fspromises.writeFile(define.clip_setting, JSON.stringify(clip_setting_json)); - - return { - code: 1, - } - } catch (error) { - return { - code: 0, - message: `Error message ${error.toString()}` - - } + try { + // 判断当前的名字是不是存在 + let name = value[0] + // 直接保存 + let clip_setting = await fspromises.readFile(define.clip_setting) + let clip_setting_json = JSON.parse(clip_setting) + let exist_style = clip_setting_json.text_style + let isExist = false + exist_style.forEach((item) => { + if (item.name == name) { + isExist = true + } + }) + if (isExist) { + return { + code: 0, + message: '预设样式名称已存在' + } } - return; -} + let draft_config = JSON.parse( + await fspromises.readFile(path.join(draft_path, 'draft_content.json')) + ) + let text = draft_config.materials.texts[0] + let content = text.content + let text_json = JSON.parse(content) + let style = text_json.styles + let srt_node = find_draft_node(draft_config.tracks, 'type', 'text').segments[0] + let clip = srt_node.clip + + let obj = { + name: value[0], + id: uuidv4(), + style, + font_size: text.font_size, + fonts: text.fonts.length > 0 ? text.fonts[0].title : '', + style_name: text.style_name, + clip, + ratio: draft_config.canvas_config.ratio + } + let text_style = clip_setting_json.text_style + text_style.push(obj) + await fspromises.writeFile(define.clip_setting, JSON.stringify(clip_setting_json)) + + return { + code: 1 + } + } catch (error) { + return { + code: 0, + message: `Error message ${error.toString()}` + } + } + return +} /** * 文案对齐 - * @param {分镜的数据} value + * @param {分镜的数据} value */ async function alginDraftImgToText(value) { - let draft_path = path.join(global.config.draft_path, value[0]); - let text_value = value[1]; + let draft_path = path.join(global.config.draft_path, value[0]) + let text_value = value[1] - try { - // 读取草稿 - let draft_config = await fspromises.readFile(path.join(draft_path, 'draft_content.json')); - // 获取字幕轨道 - let draft_config_json = JSON.parse(draft_config); + try { + // 读取草稿 + let draft_config = await fspromises.readFile(path.join(draft_path, 'draft_content.json')) + // 获取字幕轨道 + let draft_config_json = JSON.parse(draft_config) - // 所有的字幕轨道里面的数据,读取出来 - // 循环的时候,判断当前字幕是不是在第一行 - let srt_nodes = find_draft_node(draft_config_json.tracks, "type", "text").segments; - let img_nodes = find_draft_node(draft_config_json.tracks, "type", "video").segments; + // 所有的字幕轨道里面的数据,读取出来 + // 循环的时候,判断当前字幕是不是在第一行 + let srt_nodes = find_draft_node(draft_config_json.tracks, 'type', 'text').segments + let img_nodes = find_draft_node(draft_config_json.tracks, 'type', 'video').segments - let srt_list = []; - let srt_obj = null - let new_srt_list = [] - let text_count = 0; - for (let i = 0; i < srt_nodes.length;) { - const element = srt_nodes[i]; - let material_id = element.material_id; - // 获取字幕内容,里面包含样式和内容 - let srt_content = JSON.parse(find_draft_node(draft_config_json.materials.texts, "id", material_id).content); - let srt_value = srt_content.text; - // console.log(srt_value) - let start_time = element.target_timerange.start; - let end_time = element.target_timerange.start + element.target_timerange.duration; - let obj = { - start_time, - end_time, - srt_value - }; + let srt_list = [] + let srt_obj = null + let new_srt_list = [] + let text_count = 0 + for (let i = 0; i < srt_nodes.length; ) { + const element = srt_nodes[i] + let material_id = element.material_id + // 获取字幕内容,里面包含样式和内容 + let srt_content = JSON.parse( + find_draft_node(draft_config_json.materials.texts, 'id', material_id).content + ) + let srt_value = srt_content.text + // console.log(srt_value) + let start_time = element.target_timerange.start + let end_time = element.target_timerange.start + element.target_timerange.duration + let obj = { + start_time, + end_time, + srt_value + } - // 判断当前字幕是不是在当前句 - if (tools.removePunctuationIncludingEllipsis(value[1][text_count]).includes(tools.removePunctuationIncludingEllipsis(srt_value))) { - if (srt_obj == null) { - srt_obj = {} - srt_obj.start_time = start_time; - srt_obj.value = srt_value; - } - else { - srt_obj.value = srt_obj.value + srt_value; - } - srt_list.push(obj); - i++; - } else { - srt_obj.end_time = srt_list[srt_list.length - 1].end_time; - text_count++; - new_srt_list.push(srt_obj) - srt_obj = null; - } + // 判断当前字幕是不是在当前句 + if ( + tools + .removePunctuationIncludingEllipsis(value[1][text_count]) + .includes(tools.removePunctuationIncludingEllipsis(srt_value)) + ) { + if (srt_obj == null) { + srt_obj = {} + srt_obj.start_time = start_time + srt_obj.value = srt_value + } else { + srt_obj.value = srt_obj.value + srt_value } - // 最后要和音频对齐 + srt_list.push(obj) + i++ + } else { srt_obj.end_time = srt_list[srt_list.length - 1].end_time - let audio_nodes = find_draft_node(draft_config_json.tracks, "type", "audio"); - if (audio_nodes != null) { - let endTime = audio_nodes.segments[0].target_timerange.duration; - srt_obj.end_time = endTime; - } + text_count++ new_srt_list.push(srt_obj) + srt_obj = null + } + } + // 最后要和音频对齐 + srt_obj.end_time = srt_list[srt_list.length - 1].end_time + let audio_nodes = find_draft_node(draft_config_json.tracks, 'type', 'audio') + if (audio_nodes != null) { + let endTime = audio_nodes.segments[0].target_timerange.duration + srt_obj.end_time = endTime + } + new_srt_list.push(srt_obj) - // 开始对齐 - for (let i = 0; i < new_srt_list.length; i++) { - if (img_nodes.length < i) { - break; - } - if (i == 96) { - - } - const element = new_srt_list[i]; - let duration = 0; - if (i + 1 < new_srt_list.length) { - duration = new_srt_list[i + 1].start_time - element.start_time - 1; - } else { - duration = element.end_time - element.start_time; - } - img_nodes[i].source_timerange.duration = duration; - img_nodes[i].target_timerange.duration = duration; - img_nodes[i].target_timerange.start = element.start_time; - } - - - let draft_config_string = JSON.stringify(draft_config_json); - await fspromises.writeFile(path.join(draft_path, 'draft_content.json'), draft_config_string); - return { - code: 1 - } - } catch (error) { - // console.log(error) - return define.error = { - code: 0, - message: `error message ${error}` - } + // 开始对齐 + for (let i = 0; i < new_srt_list.length; i++) { + if (img_nodes.length < i) { + break + } + if (i == 96) { + } + const element = new_srt_list[i] + let duration = 0 + if (i + 1 < new_srt_list.length) { + duration = new_srt_list[i + 1].start_time - element.start_time - 1 + } else { + duration = element.end_time - element.start_time + } + img_nodes[i].source_timerange.duration = duration + img_nodes[i].target_timerange.duration = duration + img_nodes[i].target_timerange.start = element.start_time } + let draft_config_string = JSON.stringify(draft_config_json) + await fspromises.writeFile(path.join(draft_path, 'draft_content.json'), draft_config_string) + return { + code: 1 + } + } catch (error) { + // console.log(error) + return (define.error = { + code: 0, + message: `error message ${error}` + }) + } } -let pyrunner; +let pyrunner function createPythonRunner(mainWindow, define) { - if (!pyrunner) { - pyrunner = new func.PythonRunner(mainWindow, define); - } - return pyrunner; + if (!pyrunner) { + pyrunner = new func.PythonRunner(mainWindow, define) + } + return pyrunner } /** * 执行剪映图片对齐脚本 */ function alignDraftImage(mainWindow, value) { - pyrunner = createPythonRunner(mainWindow, define); - let draft_path = path.join(global.config.draft_path, value) - pyrunner.runScript(path.join(define.scripts_path, "03_align_draft_image.py"), [draft_path]) + pyrunner = createPythonRunner(mainWindow, define) + let draft_path = path.join(global.config.draft_path, value) + pyrunner.runScript(path.join(define.scripts_path, '03_align_draft_image.py'), [draft_path]) } /** * 抽取关键帧 - * @param {窗口} mainWindow - * @param {数组,第一个值为剪映草稿位置,第二个值为,输出目录} value + * @param {窗口} mainWindow + * @param {数组,第一个值为剪映草稿位置,第二个值为,输出目录} value */ async function getFrame(value) { - try { - // let scriptPath = path.join(define.scripts_path, '00_clip.py'); - // // 执行生成图片的脚本 - // let script = `cd ${define.scripts_path} && python ${scriptPath} "${project_config_path}"`; - let draft_path = path.join(global.config.draft_path, value[0]).replaceAll("\\", "/"); - let out_dir = path.join(value[1]).replaceAll("\\", "/") - let package_path = define.package_path.replaceAll("\\", "/"); - // let command = `${path.join(define.scripts_path, "05_getgrame.exe")} "${draft_path}" "${out_dir}" "${package_path}"` - let command = `"${path.join(define.scripts_path, "Lai.exe")}" -k "${draft_path}" "${out_dir}" "${package_path}"` - const output = await execAsync(command, { maxBuffer: 1024 * 1024 * 10, encoding: 'utf-8' }); - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { - code: 1, - message: "剪映关键帧抽取成功" - }) - return { - code: 1, - message: output.stdout - } - } catch (error) { - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { - code: 0, - message: error.toString() - }) - return { - code: 0, - message: error.toString() - } + try { + // let scriptPath = path.join(define.scripts_path, '00_clip.py'); + // // 执行生成图片的脚本 + // let script = `cd ${define.scripts_path} && python ${scriptPath} "${project_config_path}"`; + let draft_path = path.join(global.config.draft_path, value[0]).replaceAll('\\', '/') + let out_dir = path.join(value[1]).replaceAll('\\', '/') + let package_path = define.package_path.replaceAll('\\', '/') + + let jianying = new JianyingService() + await jianying.GetDraftFrameAndText(draft_path, out_dir, package_path) + + global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { + code: 1, + message: '剪映关键帧抽取成功' + }) + return { + code: 1, + message: '剪映关键帧抽取成功' } + } catch (error) { + global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { + code: 0, + message: error.toString() + }) + return { + code: 0, + message: error.toString() + } + } } /** * 反推调用脚本的方法 - * @param {调用的窗口} win + * @param {调用的窗口} win */ async function PushBackPrompt() { - try { - let py_path = path.join(define.scripts_path, "Lai.exe"); - let sd_config_path = define.sd_setting; - let script = `cd "${define.scripts_path}" && "${py_path}" -p "${sd_config_path.replaceAll('\\', '/')}" "input" "${global.config.project_path}"`; - const output = await execAsync(script, { maxBuffer: 1024 * 1024 * 10, encoding: 'utf-8' }); - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { - code: 1, - message: "反推成功" - }) - return { - code: 1, - data: output.stdout - } - } catch (error) { - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { - code: 0, - message: "反推错误。详情请见错误信息!" - }) - return { - code: 0, - message: error.toString() - } + try { + let py_path = path.join(define.scripts_path, 'Lai.exe') + let sd_config_path = define.sd_setting + let script = `cd "${define.scripts_path}" && "${py_path}" -p "${sd_config_path.replaceAll( + '\\', + '/' + )}" "input" "${global.config.project_path}"` + const output = await execAsync(script, { maxBuffer: 1024 * 1024 * 10, encoding: 'utf-8' }) + global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { + code: 1, + message: '反推成功' + }) + return { + code: 1, + data: output.stdout } + } catch (error) { + global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { + code: 0, + message: '反推错误。详情请见错误信息!' + }) + return { + code: 0, + message: error.toString() + } + } } /** * 执行Python脚本的类,包括了对python脚本的监听 */ class PythonRunner extends EventEmitter { - constructor(mainWindow, define) { - super(); - this.mainWindow = mainWindow; - this.define = define; - } + constructor(mainWindow, define) { + super() + this.mainWindow = mainWindow + this.define = define + } - runScript(scriptPath, args = []) { - const pythonProcess = spawn('python', [scriptPath, ...args]); + runScript(scriptPath, args = []) { + const pythonProcess = spawn('python', [scriptPath, ...args]) - // 监听Python脚本的标准输出 - pythonProcess.stdout.on('data', (data) => { - // console.log(data.toString("utf-8")) - this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, data.toString("utf-8")); - }); + // 监听Python脚本的标准输出 + pythonProcess.stdout.on('data', (data) => { + // console.log(data.toString("utf-8")) + this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, data.toString('utf-8')) + }) - // 监听Python脚本的标准错误输出 - pythonProcess.stderr.on('data', (data) => { - this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, data.toString("utf-8")); - }); + // 监听Python脚本的标准错误输出 + pythonProcess.stderr.on('data', (data) => { + this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, data.toString('utf-8')) + }) - // 监听子进程关闭事件 - pythonProcess.on('close', (code) => { - let closeMessage = `Python script exited with code ${code}`; - // console.log(closeMessage); - this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_CLOSE, closeMessage); - }); + // 监听子进程关闭事件 + pythonProcess.on('close', (code) => { + let closeMessage = `Python script exited with code ${code}` + // console.log(closeMessage); + this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_CLOSE, closeMessage) + }) - // 监听子进程错误事件 - pythonProcess.on('error', (err) => { - let errorMessage = `Python script error: ${err}`; - this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, errorMessage); - }); - } + // 监听子进程错误事件 + pythonProcess.on('error', (err) => { + let errorMessage = `Python script error: ${err}` + this.mainWindow.webContents.send(DEFINE_STRING.PYTHON_OUTPUT, errorMessage) + }) + } } /** * 判断当前文件夹下面的所有文件夹 - * @param {需要获取的文件地址} srcPath - * @returns + * @param {需要获取的文件地址} srcPath + * @returns */ async function getDirectories(srcPath) { - try { - const filesAndDirectories = await fspromises.readdir(srcPath, { withFileTypes: true }); - const directories = filesAndDirectories - .filter(dirent => dirent.isDirectory()) - .map(dirent => fspromises.stat(path.join(srcPath, dirent.name))); + try { + const filesAndDirectories = await fspromises.readdir(srcPath, { withFileTypes: true }) + const directories = filesAndDirectories + .filter((dirent) => dirent.isDirectory()) + .map((dirent) => fspromises.stat(path.join(srcPath, dirent.name))) - const directoryStats = await Promise.all(directories); - // 将目录和它们的状态对象组合成一个数组 - const directoriesWithStats = filesAndDirectories - .filter(dirent => dirent.isDirectory()) - .map((dirent, index) => ({ - name: dirent.name, - ctime: directoryStats[index].ctime - })); + const directoryStats = await Promise.all(directories) + // 将目录和它们的状态对象组合成一个数组 + const directoriesWithStats = filesAndDirectories + .filter((dirent) => dirent.isDirectory()) + .map((dirent, index) => ({ + name: dirent.name, + ctime: directoryStats[index].ctime + })) - // 按创建时间排序,最新的在前 - directoriesWithStats.sort((a, b) => b.ctime - a.ctime); + // 按创建时间排序,最新的在前 + directoriesWithStats.sort((a, b) => b.ctime - a.ctime) - // 提取排序后的目录名称 - const sortedDirectories = directoriesWithStats.map(dirent => dirent.name); - return sortedDirectories; - } catch (error) { - console.error('Error reading directories:', error); - throw error; // 或者根据需要处理错误 - } + // 提取排序后的目录名称 + const sortedDirectories = directoriesWithStats.map((dirent) => dirent.name) + return sortedDirectories + } catch (error) { + console.error('Error reading directories:', error) + throw error // 或者根据需要处理错误 + } } /** @@ -614,91 +638,114 @@ async function getDirectories(srcPath) { * @returns 返回剪映草稿列表 */ async function getDraftFileList() { - let draft_path = global.config.draft_path - let res = await getDirectories(draft_path) - return res; + let draft_path = global.config.draft_path + let res = await getDirectories(draft_path) + return res } /** * 修改SD配置 */ async function SaveSDConfig(value) { - try { - let sd_config = JSON.parse((await fspromises.readFile(define.sd_setting, "utf-8")).toString()); - global.config.webui_api_url = value.webui_api_url || value.webui_api_url == '' ? value.webui_api_url : global.config.webui_api_url; + try { + let sd_config = JSON.parse((await fspromises.readFile(define.sd_setting, 'utf-8')).toString()) + global.config.webui_api_url = + value.webui_api_url || value.webui_api_url == '' + ? value.webui_api_url + : global.config.webui_api_url - sd_config.setting.webui_api_url = value.webui_api_url || value.webui_api_url == "" ? value.webui_api_url : sd_config.setting.webui_api_url; - sd_config.setting.type = value.type ? value.type : sd_config.setting.type; - sd_config.setting.batch_size = value.batch_size ? value.batch_size : sd_config.setting.batch_size; - sd_config.setting.style_weight = value.style_weight ? value.style_weight : sd_config.setting.style_weight; + sd_config.setting.webui_api_url = + value.webui_api_url || value.webui_api_url == '' + ? value.webui_api_url + : sd_config.setting.webui_api_url + sd_config.setting.type = value.type ? value.type : sd_config.setting.type + sd_config.setting.batch_size = value.batch_size + ? value.batch_size + : sd_config.setting.batch_size + sd_config.setting.style_weight = value.style_weight + ? value.style_weight + : sd_config.setting.style_weight - sd_config.webui.prompt = value.prompt || value.prompt == "" ? value.prompt : sd_config.webui.prompt; - sd_config.webui.negative_prompt = value.negative_prompt || value.negative_prompt == "" ? value.negative_prompt : sd_config.webui.negative_prompt; - sd_config.webui.denoising_strength = value.denoising_strength || value.denoising_strength == "" ? value.denoising_strength : sd_config.webui.denoising_strength; - sd_config.webui.sampler_name = value.sampler_name ? value.sampler_name : sd_config.webui.sampler_name; - sd_config.webui.steps = value.steps ? value.steps : sd_config.webui.steps; - sd_config.webui.width = value.width ? value.width : sd_config.webui.width; - sd_config.webui.height = value.height ? value.height : sd_config.webui.height; - sd_config.webui.cfg_scale = value.cfg_scale ? value.cfg_scale : sd_config.webui.cfg_scale; - sd_config.webui.adetailer = value.hasOwnProperty("adetailer") ? value.adetailer : sd_config.webui.adetailer; - - if(!sd_config.flux){ - let model = { - model : value.flux_model ? value.flux_model : FLxuAPIImageType.FLUX - } - sd_config.flux = model - }else{ - sd_config.flux.model = value.flux_model ? value.flux_model : FLxuAPIImageType.FLUX; - } - await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)); - return { - code: 1, - message: "保存成功" - } - } catch (error) { - return { - code: 0, - message: error.toString() - } + sd_config.webui.prompt = + value.prompt || value.prompt == '' ? value.prompt : sd_config.webui.prompt + sd_config.webui.negative_prompt = + value.negative_prompt || value.negative_prompt == '' + ? value.negative_prompt + : sd_config.webui.negative_prompt + sd_config.webui.denoising_strength = + value.denoising_strength || value.denoising_strength == '' + ? value.denoising_strength + : sd_config.webui.denoising_strength + sd_config.webui.sampler_name = value.sampler_name + ? value.sampler_name + : sd_config.webui.sampler_name + sd_config.webui.steps = value.steps ? value.steps : sd_config.webui.steps + sd_config.webui.width = value.width ? value.width : sd_config.webui.width + sd_config.webui.height = value.height ? value.height : sd_config.webui.height + sd_config.webui.cfg_scale = value.cfg_scale ? value.cfg_scale : sd_config.webui.cfg_scale + sd_config.webui.adetailer = value.hasOwnProperty('adetailer') + ? value.adetailer + : sd_config.webui.adetailer + + if (!sd_config.flux) { + let model = { + model: value.flux_model ? value.flux_model : FLxuAPIImageType.FLUX + } + sd_config.flux = model + } else { + sd_config.flux.model = value.flux_model ? value.flux_model : FLxuAPIImageType.FLUX } + await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)) + return { + code: 1, + message: '保存成功' + } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 保存生成视频的简单配置 */ async function SaveGeneralSetting(value) { - try { - // 先读取 - let config_data = JSON.parse((await fspromises.readFile(define.video_config, 'utf-8')).toString()); - await fspromises.writeFile(define.video_config, JSON.stringify(value)); - return { - code: 1, - message: "保存成功" - } - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + // 先读取 + let config_data = JSON.parse( + (await fspromises.readFile(define.video_config, 'utf-8')).toString() + ) + await fspromises.writeFile(define.video_config, JSON.stringify(value)) + return { + code: 1, + message: '保存成功' } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 获取合成视频的配置信息。包含基本设置。字幕设置。音频设置。水印设置 */ async function GetVideoConfigMessage() { - try { - let data = JSON.parse((await fspromises.readFile(define.video_config, 'utf-8')).toString()); - return { - code: 1, - data: data - } - } catch (error) { - return { - code: 0, - message: error.message - } + try { + let data = JSON.parse((await fspromises.readFile(define.video_config, 'utf-8')).toString()) + return { + code: 1, + data: data } + } catch (error) { + return { + code: 0, + message: error.message + } + } } /** @@ -706,413 +753,366 @@ async function GetVideoConfigMessage() { * 使用python脚本实现 */ async function GetSystemInstallFontName() { - try { - // 执行python - let scriptPath = path.join(define.scripts_path, 'Lai.exe'); - // let scriptPath = path.join(define.scripts_path, '00_clip.exe'); - // 执行生成图片的脚本 - let script = `cd "${define.scripts_path}" && "${scriptPath}" -f "${define.video_config.replaceAll("\\", "/")}"`; - const output = await execAsync(script, { maxBuffer: 1024 * 1024 * 10, encoding: 'utf-8' }); - return { - code: 1, - } - - } catch (error) { - return { - code: 0, - message: error - } + try { + // 执行python + let scriptPath = path.join(define.scripts_path, 'Lai.exe') + // let scriptPath = path.join(define.scripts_path, '00_clip.exe'); + // 执行生成图片的脚本 + let script = `cd "${ + define.scripts_path + }" && "${scriptPath}" -f "${define.video_config.replaceAll('\\', '/')}"` + const output = await execAsync(script, { maxBuffer: 1024 * 1024 * 10, encoding: 'utf-8' }) + return { + code: 1 } + } catch (error) { + return { + code: 0, + message: error + } + } } /** * 保存字幕的配置信息 */ async function SaveAssConfig(value) { - try { - - let video_config = JSON.parse(await fspromises.readFile(define.video_config, 'utf8')); - // 判断ID是不是存在。存在的话直接修改。不存在创建 - if (value[1].id == null) { - value[1].id = uuidv4(); - video_config[value[0]].push(value[1]); - } else { - let index = video_config[value[0]].findIndex(item => item.id == value[1].id); - if (index !== -1) { - let old = video_config[value[0]][index]; - old.fontName = value[1].fontName; - old.fontSize = value[1].fontSize; - old.fontColor = value[1].fontColor; - old.transparent = value[1].transparent; - old.positionX = value[1].positionX; - old.positionY = value[1].positionY; - if (value[0] == "watermarkConfig") { - old.showText = value[1].showText; - } - } - } - await fspromises.writeFile(define.video_config, JSON.stringify(video_config)); - return { - code: 1, - message: "添加成功" - } - } catch (error) { - return { - code: 0, - message: error.toString() + try { + let video_config = JSON.parse(await fspromises.readFile(define.video_config, 'utf8')) + // 判断ID是不是存在。存在的话直接修改。不存在创建 + if (value[1].id == null) { + value[1].id = uuidv4() + video_config[value[0]].push(value[1]) + } else { + let index = video_config[value[0]].findIndex((item) => item.id == value[1].id) + if (index !== -1) { + let old = video_config[value[0]][index] + old.fontName = value[1].fontName + old.fontSize = value[1].fontSize + old.fontColor = value[1].fontColor + old.transparent = value[1].transparent + old.positionX = value[1].positionX + old.positionY = value[1].positionY + if (value[0] == 'watermarkConfig') { + old.showText = value[1].showText } + } } + await fspromises.writeFile(define.video_config, JSON.stringify(video_config)) + return { + code: 1, + message: '添加成功' + } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 删除视频配置的指定ID */ async function DeleteVideoConfig(value) { - // console.log(value) - try { - let video_config = JSON.parse(await fspromises.readFile(define.video_config, 'utf-8')); - video_config[value[0]] = video_config[value[0]].filter(item => item.id != value[1]); - await fspromises.writeFile(define.video_config, JSON.stringify(video_config)); - return { - code: 1 - } - } catch (error) { - return { - code: 0, - message: error - } + // console.log(value) + try { + let video_config = JSON.parse(await fspromises.readFile(define.video_config, 'utf-8')) + video_config[value[0]] = video_config[value[0]].filter((item) => item.id != value[1]) + await fspromises.writeFile(define.video_config, JSON.stringify(video_config)) + return { + code: 1 } + } catch (error) { + return { + code: 0, + message: error + } + } } /** * 添加生图任务队列 - * @param {传入的值} value + * @param {传入的值} value */ async function AddImageTask(value) { - try { - - // 判断文件目录是不是存在 - let json_path = path.join(global.config.project_path, "scripts/task_list.json"); - // 判断文件是不是存在 - let isExit = await tools.checkExists(json_path); - let json_data = {}; - if (!isExit) { - const dirPath = path.dirname(json_path); - let dirIsExit = await tools.checkExists(dirPath); - if (!dirIsExit) { - await fspromises.mkdir(dirPath, { recursive: true }); - } - await fspromises.writeFile(json_path, '{}'); - } - let task_list = []; - let task_list_data = await pm.GetImageTask(); - if (task_list_data.code == 0) { - return task_list_data; - } - // 获取当前的最大的任务编号 - let current_no = 0; - if (task_list_data.data != null && task_list_data.data.task_list != null) { - - if (task_list_data.data.task_list.length > 0) { - const maxNoObject = task_list_data.data.task_list.reduce((max, obj) => obj.no > max.no ? obj : max, task_list_data.data.task_list[0]); - current_no = maxNoObject.no; - task_list = task_list_data.data.task_list; - } - } - // 循环输出轮次,往里面添加数据 - for (let i = 0; i < value.output_rounds; i++) { - task_list.push({ - id: uuidv4(), - no: current_no + i + 1, - lora: value.lora, - out_folder: 'output_crop_' + String(current_no + i + 1).padStart(5, '0'), - image_style: value.image_style, - image_style_list: value.image_style_list, - status: "wait" - }) - } - // 写入 - task_list_data.data.task_list = task_list; - let write_data = task_list_data.data; - await fspromises.writeFile(json_path, JSON.stringify(write_data)); - return { - code: 1, - data: write_data - } - } catch (error) { - return { - code: 0, - message: error - } + try { + // 判断文件目录是不是存在 + let json_path = path.join(global.config.project_path, 'scripts/task_list.json') + // 判断文件是不是存在 + let isExit = await tools.checkExists(json_path) + let json_data = {} + if (!isExit) { + const dirPath = path.dirname(json_path) + let dirIsExit = await tools.checkExists(dirPath) + if (!dirIsExit) { + await fspromises.mkdir(dirPath, { recursive: true }) + } + await fspromises.writeFile(json_path, '{}') } - + let task_list = [] + let task_list_data = await pm.GetImageTask() + if (task_list_data.code == 0) { + return task_list_data + } + // 获取当前的最大的任务编号 + let current_no = 0 + if (task_list_data.data != null && task_list_data.data.task_list != null) { + if (task_list_data.data.task_list.length > 0) { + const maxNoObject = task_list_data.data.task_list.reduce( + (max, obj) => (obj.no > max.no ? obj : max), + task_list_data.data.task_list[0] + ) + current_no = maxNoObject.no + task_list = task_list_data.data.task_list + } + } + // 循环输出轮次,往里面添加数据 + for (let i = 0; i < value.output_rounds; i++) { + task_list.push({ + id: uuidv4(), + no: current_no + i + 1, + lora: value.lora, + out_folder: 'output_crop_' + String(current_no + i + 1).padStart(5, '0'), + image_style: value.image_style, + image_style_list: value.image_style_list, + status: 'wait' + }) + } + // 写入 + task_list_data.data.task_list = task_list + let write_data = task_list_data.data + await fspromises.writeFile(json_path, JSON.stringify(write_data)) + return { + code: 1, + data: write_data + } + } catch (error) { + return { + code: 0, + message: error + } + } } /** * 删除指定ID的值 - * @param {ID} value + * @param {ID} value */ async function DeleteImageTaskList(value) { - try { - // 判断当前的状态。是不是可以删除。正在生成的文件不能删除 - // 目前先是直接删除 - let task_list = JSON.parse(await fspromises.readFile(path.join(global.config.project_path, 'scripts/task_list.json'), 'utf-8')); + try { + // 判断当前的状态。是不是可以删除。正在生成的文件不能删除 + // 目前先是直接删除 + let task_list = JSON.parse( + await fspromises.readFile( + path.join(global.config.project_path, 'scripts/task_list.json'), + 'utf-8' + ) + ) - // 判断状态,删除指定的输出文件夹 - let d_t = task_list.task_list.filter(item => item.id == value)[0]; - // ok 状态删除对应的输出文件夹 - if (d_t.status == "ok" || d_t.status == "error" || d_t.status.startsWith("video")) { - await fspromises.rm(path.join(global.config.project_path, 'tmp/' + d_t.out_folder), { recursive: true, force: true }); - } - - let new_data = task_list.task_list.filter(item => item.id != value); - task_list.task_list = new_data; - await fspromises.writeFile(path.join(global.config.project_path, 'scripts/task_list.json'), JSON.stringify(task_list)); - - return { - code: 1 - } - } catch (error) { - return { - code: 0, - message: error.toString() - } + // 判断状态,删除指定的输出文件夹 + let d_t = task_list.task_list.filter((item) => item.id == value)[0] + // ok 状态删除对应的输出文件夹 + if (d_t.status == 'ok' || d_t.status == 'error' || d_t.status.startsWith('video')) { + await fspromises.rm(path.join(global.config.project_path, 'tmp/' + d_t.out_folder), { + recursive: true, + force: true + }) } -} + let new_data = task_list.task_list.filter((item) => item.id != value) + task_list.task_list = new_data + await fspromises.writeFile( + path.join(global.config.project_path, 'scripts/task_list.json'), + JSON.stringify(task_list) + ) + return { + code: 1 + } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } +} /** * 获取不想要的提示词 */ async function GetBadPrompt() { - try { - let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - return { - code: 1, - value: sd_config.tag.badPrompt - } - - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + return { + code: 1, + value: sd_config.tag.badPrompt } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } -/** +/** * 保存不想要的提示词 */ async function SaveBadPrompt(value) { - try { - let tag = value.join(','); - // 写入 - let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - sd_config.tag.badPrompt = tag; - await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)); - return { - code: 1, - } - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + let tag = value.join(',') + // 写入 + let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + sd_config.tag.badPrompt = tag + await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)) + return { + code: 1 } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 一键删除不想要的值 */ async function DeleteBadPrompt() { - try { - let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - let badPrompt = sd_config.tag.badPrompt; - let badPrompts = []; - if (badPrompt != null) { - badPrompts = badPrompt.split(','); - } - // 修改所有的提示词 - let promptPath = await tools.getFilesWithExtensions(path.join(global.config.project_path, 'tmp/input_crop'), '.txt'); - for (let i = 0; i < promptPath.length; i++) { - const item = promptPath[i]; - let txtStr = await fspromises.readFile(item, 'utf-8'); - let tags = txtStr.split(','); - tags = tags.filter(d => !badPrompts.includes(d)); - // 重新写入 - await fspromises.writeFile(item, tags.join(',')); - } - return { - code: 1, - } - - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + let badPrompt = sd_config.tag.badPrompt + let badPrompts = [] + if (badPrompt != null) { + badPrompts = badPrompt.split(',') } + // 修改所有的提示词 + let promptPath = await tools.getFilesWithExtensions( + path.join(global.config.project_path, 'tmp/input_crop'), + '.txt' + ) + for (let i = 0; i < promptPath.length; i++) { + const item = promptPath[i] + let txtStr = await fspromises.readFile(item, 'utf-8') + let tags = txtStr.split(',') + tags = tags.filter((d) => !badPrompts.includes(d)) + // 重新写入 + await fspromises.writeFile(item, tags.join(',')) + } + return { + code: 1 + } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } - /** * 打开购买 GPT 的网址 */ async function openGptBuyUrl(value) { - OpenUrl(value) + OpenUrl(value) } /** * 打开传入的网址 - * @param {} value + * @param {} value */ async function OpenUrl(value) { - shell.openExternal(value) + shell.openExternal(value) } /** * 获取ADetailer配置列表 */ async function GetADetailerList() { - try { - console.log(123); - let sd_setting = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - return { - code: 1, - data: sd_setting.adetailer - } - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + console.log(123) + let sd_setting = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + return { + code: 1, + data: sd_setting.adetailer } - + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 保存ADetailer数据信息 */ async function SaveADetailerConfig(value) { - try { - let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')); - sd_config.adetailer = value; - await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)); - return { - code: 1, - message: "保存成功" - } - - } catch (error) { - return { - code: 0, - message: error.toString() - } + try { + let sd_config = JSON.parse(await fspromises.readFile(define.sd_setting, 'utf-8')) + sd_config.adetailer = value + await fspromises.writeFile(define.sd_setting, JSON.stringify(sd_config)) + return { + code: 1, + message: '保存成功' } + } catch (error) { + return { + code: 0, + message: error.toString() + } + } } /** * 开始分镜 */ async function StartStoryboarding(value) { - try { - // 判断文件夹是不是存在,不存在创建 - let frame_path = path.join(global.config.project_path, "data/frame"); - let isExist = await tools.checkExists(frame_path); - if (isExist) { - await tools.deleteFileOrDirectory(frame_path); - await fspromises.mkdir(frame_path, { recursive: true }); - } else { - await fspromises.mkdir(frame_path, { recursive: true }); - } - let tmp_path = path.join(global.config.project_path, "tmp"); - isExist = await tools.checkExists(tmp_path); - if (!isExist) { - await fspromises.mkdir(tmp_path, { recursive: true }); - } - let input_path = path.join(global.config.project_path, "tmp/input_crop"); - isExist = await tools.checkExists(input_path); - if (isExist) { - await tools.deleteFileOrDirectory(input_path); - await fspromises.mkdir(input_path, { recursive: true }); - } else { - await fspromises.mkdir(input_path, { recursive: true }); - } - - global.newWindow[0].win.webContents.send(DEFINE_STRING.GET_FRAME_RETUN, { code: 1, data: "正在调用进程。请勿关闭程序" }) - let cc = `${path.join(define.scripts_path, 'Lai.exe')}`; - // 获取生成视频设置 - let video_config = JSON.parse(await fspromises.readFile(define.video_config, 'utf-8')); - let gpu = global.gpu.type; - if (video_config.libx264) { - gpu = "OTHER"; - } - let child = spawn(cc, ["-a", value.video_path, frame_path, input_path, value.sensitivity, gpu], { encoding: 'utf-8' }); - child.on('error', console.error) - child.stdout.on('data', (data) => { - console.log(data.toString()); - global.newWindow[0].win.webContents.send(DEFINE_STRING.GET_FRAME_RETUN, { code: 1, data: data.toString() }) - }) - child.stderr.on('data', (data) => { - console.log('stderr=', data.toString()) - global.newWindow[0].win.webContents.send(DEFINE_STRING.GET_FRAME_RETUN, { code: 1, data: data.toString() }) - }) - - child.on('close', async (data) => { - console.log('data=', data.toString()) - // 判断该当前文件夹下面是不是有文案文件。有的话判断最后一行是不是空 - let isE = await tools.checkExists(path.join(global.config.project_path, "data/文案.txt")); - if (isE) { - let lines = (await fspromises.readFile(path.join(global.config.project_path, "data/文案.txt"), 'utf-8')).split(/\r?\n/); - let lastLine = lines[lines.length - 1]; - if (lastLine == "") { - lines = lines.slice(0, -1); - } - await fspromises.writeFile(path.join(global.config.project_path, "文案.txt"), lines.join('\n'), 'utf-8'); - } - global.newWindow[0].win.webContents.send(DEFINE_STRING.GET_FRAME_RETUN, { code: 1, data: data.toString(), type: 0 }) - if (data == 0) { - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { code: 1, message: "分镜、抽帧、语音识别完成!" }) - } else { - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { code: 0, message: "分镜错误。请看详细信息!" }) - } - }) - } catch (error) { - global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { code: 0, message: error.toString() }) - return { - code: 0, message: error.toString() - } + try { + let videoHandle = new VideoHandle() + await videoHandle.StartStoryboarding(value.video_path, value.sensitivity) + return successMessage(null, '分镜、抽帧、语音识别完成!') + } catch (error) { + global.newWindow[0].win.webContents.send(DEFINE_STRING.SHOW_MESSAGE_DIALOG, { + code: 0, + message: error.toString() + }) + return { + code: 0, + message: error.toString() } + } } export const func = { - getDraftFileList, - PythonRunner, - getFrame, - alignDraftImage, - alginDraftImgToText, - getDraftTextStyle, - getClipSetting, - addDraft, - ReGenerateImageOne, - PushBackPrompt, - GetDraftFriendlyReminder, - SaveNewWord, - SaveSDConfig, - SaveGeneralSetting, - GetVideoConfigMessage, - GetSystemInstallFontName, - SaveAssConfig, - DeleteVideoConfig, - AddImageTask, - DeleteImageTaskList, - GetBadPrompt, - SaveBadPrompt, - DeleteBadPrompt, - openGptBuyUrl, - GetADetailerList, - SaveADetailerConfig, - OpenUrl, - StartStoryboarding -} \ No newline at end of file + getDraftFileList, + PythonRunner, + getFrame, + alignDraftImage, + alginDraftImgToText, + getDraftTextStyle, + getClipSetting, + addDraft, + ReGenerateImageOne, + PushBackPrompt, + GetDraftFriendlyReminder, + SaveNewWord, + SaveSDConfig, + SaveGeneralSetting, + GetVideoConfigMessage, + GetSystemInstallFontName, + SaveAssConfig, + DeleteVideoConfig, + AddImageTask, + DeleteImageTaskList, + GetBadPrompt, + SaveBadPrompt, + DeleteBadPrompt, + openGptBuyUrl, + GetADetailerList, + SaveADetailerConfig, + OpenUrl, + StartStoryboarding +} diff --git a/src/preload/index.js b/src/preload/index.js index ab8b66b..503e072 100644 --- a/src/preload/index.js +++ b/src/preload/index.js @@ -73,9 +73,8 @@ const api = { }, // 分镜语音识别消息 - StartStoryboarding: async (value) => { - let res = await ipcRenderer.invoke(DEFINE_STRING.START_STORY_BOARDING, value) - }, + StartStoryboarding: async (value) => + await ipcRenderer.invoke(DEFINE_STRING.START_STORY_BOARDING, value), // 获取设置的初始数据 getSettingDafultData: async (callback) => diff --git a/src/renderer/src/components/Backstep/GetFrame.vue b/src/renderer/src/components/Backstep/GetFrame.vue index 0c2fa6a..b157f1e 100644 --- a/src/renderer/src/components/Backstep/GetFrame.vue +++ b/src/renderer/src/components/Backstep/GetFrame.vue @@ -37,7 +37,6 @@ label-placement="left" inline :model="frameValue" - :rules="rules" size="medium" > @@ -68,7 +67,7 @@ -