V 3.3.9(2025.05.29)

1. 生图包适配,修复部分任务无法获取图片的问题。
2. 新增聚合推文原创重置功能,直接重置GPT提示词、合并提示词、出图等。
3. 优化加载界面,提升用户体验。
4. 优化主页显示,优化UI界面,提升用户体验。
This commit is contained in:
lq1405 2025-05-29 14:44:37 +08:00
parent 50900d3465
commit 7a774c48da
42 changed files with 3256 additions and 2359 deletions

View File

@ -1,6 +1,6 @@
{ {
"name": "laitool", "name": "laitool",
"version": "3.3.8", "version": "3.3.9",
"description": "An AI tool for image processing, video processing, and other functions.", "description": "An AI tool for image processing, video processing, and other functions.",
"main": "./out/main/index.js", "main": "./out/main/index.js",
"author": "laitool.cn", "author": "laitool.cn",
@ -102,4 +102,4 @@
"icon": "./resources/icon.ico" "icon": "./resources/icon.ico"
} }
} }
} }

Binary file not shown.

View File

@ -1,29 +0,0 @@
Collecting accelerate
Downloading accelerate-1.0.1-py3-none-any.whl.metadata (19 kB)
Requirement already satisfied: numpy<3.0.0,>=1.17 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (2.1.2)
Requirement already satisfied: packaging>=20.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (24.1)
Collecting psutil (from accelerate)
Downloading psutil-6.1.0-cp37-abi3-win_amd64.whl.metadata (23 kB)
Requirement already satisfied: pyyaml in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (6.0.2)
Requirement already satisfied: torch>=1.10.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (2.5.0)
Requirement already satisfied: huggingface-hub>=0.21.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (0.26.1)
Requirement already satisfied: safetensors>=0.4.3 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from accelerate) (0.4.5)
Requirement already satisfied: filelock in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (3.16.1)
Requirement already satisfied: fsspec>=2023.5.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (2024.10.0)
Requirement already satisfied: requests in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (2.32.3)
Requirement already satisfied: tqdm>=4.42.1 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (4.66.5)
Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (4.12.2)
Requirement already satisfied: networkx in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from torch>=1.10.0->accelerate) (3.4.2)
Requirement already satisfied: jinja2 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from torch>=1.10.0->accelerate) (3.1.4)
Requirement already satisfied: sympy==1.13.1 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from torch>=1.10.0->accelerate) (1.13.1)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from sympy==1.13.1->torch>=1.10.0->accelerate) (1.3.0)
Requirement already satisfied: colorama in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from tqdm>=4.42.1->huggingface-hub>=0.21.0->accelerate) (0.4.6)
Requirement already satisfied: MarkupSafe>=2.0 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from jinja2->torch>=1.10.0->accelerate) (3.0.2)
Requirement already satisfied: charset-normalizer<4,>=2 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\27698\desktop\laitool\resources\scripts\joycaption\.venv\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2024.8.30)
Downloading accelerate-1.0.1-py3-none-any.whl (330 kB)
Downloading psutil-6.1.0-cp37-abi3-win_amd64.whl (254 kB)
Installing collected packages: psutil, accelerate
Successfully installed accelerate-1.0.1 psutil-6.1.0

View File

@ -1,336 +0,0 @@
import spaces
import gradio as gr
from huggingface_hub import InferenceClient
from torch import nn
from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
from pathlib import Path
import torch
import torch.amp.autocast_mode
from PIL import Image
import os
import torchvision.transforms.functional as TVF
CLIP_PATH = "google/siglip-so400m-patch14-384"
CHECKPOINT_PATH = Path("cgrkzexw-599808")
TITLE = "<h1><center>JoyCaption Alpha Two (2024-09-26a)</center></h1>"
CAPTION_TYPE_MAP = {
"Descriptive": [
"Write a descriptive caption for this image in a formal tone.",
"Write a descriptive caption for this image in a formal tone within {word_count} words.",
"Write a {length} descriptive caption for this image in a formal tone.",
],
"Descriptive (Informal)": [
"Write a descriptive caption for this image in a casual tone.",
"Write a descriptive caption for this image in a casual tone within {word_count} words.",
"Write a {length} descriptive caption for this image in a casual tone.",
],
"Training Prompt": [
"Write a stable diffusion prompt for this image.",
"Write a stable diffusion prompt for this image within {word_count} words.",
"Write a {length} stable diffusion prompt for this image.",
],
"MidJourney": [
"Write a MidJourney prompt for this image.",
"Write a MidJourney prompt for this image within {word_count} words.",
"Write a {length} MidJourney prompt for this image.",
],
"Booru tag list": [
"Write a list of Booru tags for this image.",
"Write a list of Booru tags for this image within {word_count} words.",
"Write a {length} list of Booru tags for this image.",
],
"Booru-like tag list": [
"Write a list of Booru-like tags for this image.",
"Write a list of Booru-like tags for this image within {word_count} words.",
"Write a {length} list of Booru-like tags for this image.",
],
"Art Critic": [
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc.",
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it within {word_count} words.",
"Analyze this image like an art critic would with information about its composition, style, symbolism, the use of color, light, any artistic movement it might belong to, etc. Keep it {length}.",
],
"Product Listing": [
"Write a caption for this image as though it were a product listing.",
"Write a caption for this image as though it were a product listing. Keep it under {word_count} words.",
"Write a {length} caption for this image as though it were a product listing.",
],
"Social Media Post": [
"Write a caption for this image as if it were being used for a social media post.",
"Write a caption for this image as if it were being used for a social media post. Limit the caption to {word_count} words.",
"Write a {length} caption for this image as if it were being used for a social media post.",
],
}
HF_TOKEN = os.environ.get("HF_TOKEN", None)
class ImageAdapter(nn.Module):
def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):
super().__init__()
self.deep_extract = deep_extract
if self.deep_extract:
input_features = input_features * 5
self.linear1 = nn.Linear(input_features, output_features)
self.activation = nn.GELU()
self.linear2 = nn.Linear(output_features, output_features)
self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)
self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))
# Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>)
self.other_tokens = nn.Embedding(3, output_features)
self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
def forward(self, vision_outputs: torch.Tensor):
if self.deep_extract:
x = torch.concat((
vision_outputs[-2],
vision_outputs[3],
vision_outputs[7],
vision_outputs[13],
vision_outputs[20],
), dim=-1)
assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features
assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}"
else:
x = vision_outputs[-2]
x = self.ln1(x)
if self.pos_emb is not None:
assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}"
x = x + self.pos_emb
x = self.linear1(x)
x = self.activation(x)
x = self.linear2(x)
# <|image_start|>, IMAGE, <|image_end|>
other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))
assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}"
x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)
return x
def get_eot_embedding(self):
return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)
# Load CLIP
print("Loading CLIP")
clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
clip_model = AutoModel.from_pretrained(CLIP_PATH)
clip_model = clip_model.vision_model
assert (CHECKPOINT_PATH / "clip_model.pt").exists()
print("Loading VLM's custom vision model")
checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu')
checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
clip_model.load_state_dict(checkpoint)
del checkpoint
clip_model.eval()
clip_model.requires_grad_(False)
clip_model.to("cuda")
# Tokenizer
print("Loading tokenizer")
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH / "text_model", use_fast=True)
assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
# LLM
print("Loading LLM")
print("Loading VLM's custom text model")
text_model = AutoModelForCausalLM.from_pretrained(CHECKPOINT_PATH / "text_model", device_map=0, torch_dtype=torch.bfloat16)
text_model.eval()
# Image Adapter
print("Loading image adapter")
image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)
image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu"))
image_adapter.eval()
image_adapter.to("cuda")
@spaces.GPU()
@torch.no_grad()
def stream_chat(input_image: Image.Image, caption_type: str, caption_length: str | int, extra_options: list[str], name_input: str, custom_prompt: str) -> tuple[str, str]:
torch.cuda.empty_cache()
# 'any' means no length specified
length = None if caption_length == "any" else caption_length
if isinstance(length, str):
try:
length = int(length)
except ValueError:
pass
# Build prompt
if length is None:
map_idx = 0
elif isinstance(length, int):
map_idx = 1
elif isinstance(length, str):
map_idx = 2
else:
raise ValueError(f"Invalid caption length: {length}")
prompt_str = CAPTION_TYPE_MAP[caption_type][map_idx]
# Add extra options
if len(extra_options) > 0:
prompt_str += " " + " ".join(extra_options)
# Add name, length, word_count
prompt_str = prompt_str.format(name=name_input, length=caption_length, word_count=caption_length)
if custom_prompt.strip() != "":
prompt_str = custom_prompt.strip()
# For debugging
print(f"Prompt: {prompt_str}")
# Preprocess image
# NOTE: I found the default processor for so400M to have worse results than just using PIL directly
#image = clip_processor(images=input_image, return_tensors='pt').pixel_values
image = input_image.resize((384, 384), Image.LANCZOS)
pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0
pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
pixel_values = pixel_values.to('cuda')
# Embed image
# This results in Batch x Image Tokens x Features
with torch.amp.autocast_mode.autocast('cuda', enabled=True):
vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)
embedded_images = image_adapter(vision_outputs.hidden_states)
embedded_images = embedded_images.to('cuda')
# Build the conversation
convo = [
{
"role": "system",
"content": "You are a helpful image captioner.",
},
{
"role": "user",
"content": prompt_str,
},
]
# Format the conversation
convo_string = tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = True)
assert isinstance(convo_string, str)
# Tokenize the conversation
# prompt_str is tokenized separately so we can do the calculations below
convo_tokens = tokenizer.encode(convo_string, return_tensors="pt", add_special_tokens=False, truncation=False)
prompt_tokens = tokenizer.encode(prompt_str, return_tensors="pt", add_special_tokens=False, truncation=False)
assert isinstance(convo_tokens, torch.Tensor) and isinstance(prompt_tokens, torch.Tensor)
convo_tokens = convo_tokens.squeeze(0) # Squeeze just to make the following easier
prompt_tokens = prompt_tokens.squeeze(0)
# Calculate where to inject the image
eot_id_indices = (convo_tokens == tokenizer.convert_tokens_to_ids("<|eot_id|>")).nonzero(as_tuple=True)[0].tolist()
assert len(eot_id_indices) == 2, f"Expected 2 <|eot_id|> tokens, got {len(eot_id_indices)}"
preamble_len = eot_id_indices[1] - prompt_tokens.shape[0] # Number of tokens before the prompt
# Embed the tokens
convo_embeds = text_model.model.embed_tokens(convo_tokens.unsqueeze(0).to('cuda'))
# Construct the input
input_embeds = torch.cat([
convo_embeds[:, :preamble_len], # Part before the prompt
embedded_images.to(dtype=convo_embeds.dtype), # Image
convo_embeds[:, preamble_len:], # The prompt and anything after it
], dim=1).to('cuda')
input_ids = torch.cat([
convo_tokens[:preamble_len].unsqueeze(0),
torch.zeros((1, embedded_images.shape[1]), dtype=torch.long), # Dummy tokens for the image (TODO: Should probably use a special token here so as not to confuse any generation algorithms that might be inspecting the input)
convo_tokens[preamble_len:].unsqueeze(0),
], dim=1).to('cuda')
attention_mask = torch.ones_like(input_ids)
# Debugging
print(f"Input to model: {repr(tokenizer.decode(input_ids[0]))}")
#generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=False, suppress_tokens=None)
#generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, top_k=10, temperature=0.5, suppress_tokens=None)
generate_ids = text_model.generate(input_ids, inputs_embeds=input_embeds, attention_mask=attention_mask, max_new_tokens=300, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9
# Trim off the prompt
generate_ids = generate_ids[:, input_ids.shape[1]:]
if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"):
generate_ids = generate_ids[:, :-1]
caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
return prompt_str, caption.strip()
with gr.Blocks() as demo:
gr.HTML(TITLE)
with gr.Row():
with gr.Column():
input_image = gr.Image(type="pil", label="Input Image")
caption_type = gr.Dropdown(
choices=["Descriptive", "Descriptive (Informal)", "Training Prompt", "MidJourney", "Booru tag list", "Booru-like tag list", "Art Critic", "Product Listing", "Social Media Post"],
label="Caption Type",
value="Descriptive",
)
caption_length = gr.Dropdown(
choices=["any", "very short", "short", "medium-length", "long", "very long"] +
[str(i) for i in range(20, 261, 10)],
label="Caption Length",
value="long",
)
extra_options = gr.CheckboxGroup(
choices=[
"If there is a person/character in the image you must refer to them as {name}.",
"Do NOT include information about people/characters that cannot be changed (like ethnicity, gender, etc), but do still include changeable attributes (like hair style).",
"Include information about lighting.",
"Include information about camera angle.",
"Include information about whether there is a watermark or not.",
"Include information about whether there are JPEG artifacts or not.",
"If it is a photo you MUST include information about what camera was likely used and details such as aperture, shutter speed, ISO, etc.",
"Do NOT include anything sexual; keep it PG.",
"Do NOT mention the image's resolution.",
"You MUST include information about the subjective aesthetic quality of the image from low to very high.",
"Include information on the image's composition style, such as leading lines, rule of thirds, or symmetry.",
"Do NOT mention any text that is in the image.",
"Specify the depth of field and whether the background is in focus or blurred.",
"If applicable, mention the likely use of artificial or natural lighting sources.",
"Do NOT use any ambiguous language.",
"Include whether the image is sfw, suggestive, or nsfw.",
"ONLY describe the most important elements of the image."
],
label="Extra Options"
)
name_input = gr.Textbox(label="Person/Character Name (if applicable)")
gr.Markdown("**Note:** Name input is only used if an Extra Option is selected that requires it.")
custom_prompt = gr.Textbox(label="Custom Prompt (optional, will override all other settings)")
gr.Markdown("**Note:** Alpha Two is not a general instruction follower and will not follow prompts outside its training data well. Use this feature with caution.")
run_button = gr.Button("Caption")
with gr.Column():
output_prompt = gr.Textbox(label="Prompt that was used")
output_caption = gr.Textbox(label="Caption")
run_button.click(fn=stream_chat, inputs=[input_image, caption_type, caption_length, extra_options, name_input, custom_prompt], outputs=[output_prompt, output_caption])
if __name__ == "__main__":
demo.launch()

View File

@ -1,423 +0,0 @@
#!/usr/bin/env python3
"""
Use JoyCaption to caption images.
"""
import argparse
import dataclasses
import json
import logging
import os
import random
from pathlib import Path
import PIL.Image
import torch
import torch.amp
import torchvision.transforms.functional as TVF
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import (
AutoTokenizer,
LlavaForConditionalGeneration,
PreTrainedTokenizer,
PreTrainedTokenizerFast,
)
def none_or_type(value, desired_type):
if value == "None":
return None
return desired_type(value)
parser = argparse.ArgumentParser()
parser.add_argument("--glob", type=str, help="Glob pattern to find images")
parser.add_argument("--filelist", type=str, help="File containing list of images")
parser.add_argument("--prompt", type=str, help="Prompt to use")
parser.add_argument(
"--prompt-file", type=str, help="JSON file containing prompts to use"
)
parser.add_argument("--batch-size", type=int, default=1, help="Batch size")
parser.add_argument(
"--greedy", action="store_true", help="Use greedy decoding instead of sampling"
)
parser.add_argument(
"--temperature", type=float, default=0.6, help="Sampling temperature"
)
parser.add_argument(
"--top-p", type=lambda x: none_or_type(x, float), default=0.9, help="Top-p sampling"
)
parser.add_argument(
"--top-k", type=lambda x: none_or_type(x, int), default=None, help="Top-k sampling"
)
parser.add_argument(
"--max-new-tokens",
type=int,
default=256,
help="Maximum length of the generated caption (in tokens)",
)
parser.add_argument(
"--num-workers",
type=int,
default=4,
help="Number of workers loading images in parallel",
)
parser.add_argument(
"--model",
type=str,
default="fancyfeast/llama-joycaption-alpha-two-hf-llava",
help="Model to use",
)
PIL.Image.MAX_IMAGE_PIXELS = 933120000 # Quiets Pillow from giving warnings on really large images (WARNING: Exposes a risk of DoS from malicious images)
@dataclasses.dataclass
class Prompt:
prompt: str
weight: float
@torch.no_grad()
def main():
# Logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s"
)
# Parse arguments
args = parser.parse_args()
logging.info(f"Arguments: {args}")
args.prompt = "Please describe the image."
# Make sure we have a prompt or a prompt file
prompts = parse_prompts(args.prompt, args.prompt_file)
args.filelist = (
"C:\\Users\\27698\\Desktop\\node\\12\\12.txt"
)
# Find the images
image_paths = find_images(args.glob, args.filelist)
if len(image_paths) == 0:
logging.warning("No images found")
return
logging.info(f"Found {len(image_paths)} images")
# Ignore all images that already have captions
image_paths = [
path for path in image_paths if not Path(path).with_suffix(".txt").exists()
]
# Load JoyCaption
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(
tokenizer, PreTrainedTokenizerFast
), f"Tokenizer is of type {type(tokenizer)}"
llava_model = LlavaForConditionalGeneration.from_pretrained(
args.model, torch_dtype="bfloat16"
)
assert isinstance(llava_model, LlavaForConditionalGeneration)
dataset = ImageDataset(
prompts,
image_paths,
tokenizer,
llava_model.config.image_token_index,
llava_model.config.image_seq_length,
)
dataloader = DataLoader(
dataset,
collate_fn=dataset.collate_fn,
num_workers=args.num_workers,
shuffle=False,
drop_last=False,
batch_size=args.batch_size,
)
end_of_header_id = tokenizer.convert_tokens_to_ids("<|end_header_id|>")
end_of_turn_id = tokenizer.convert_tokens_to_ids("<|eot_id|>")
assert isinstance(end_of_header_id, int) and isinstance(end_of_turn_id, int)
pbar = tqdm(total=len(image_paths), desc="Captioning images...", dynamic_ncols=True)
for batch in dataloader:
vision_dtype = (
llava_model.vision_tower.vision_model.embeddings.patch_embedding.weight.dtype
)
vision_device = (
llava_model.vision_tower.vision_model.embeddings.patch_embedding.weight.device
)
language_device = (
llava_model.language_model.get_input_embeddings().weight.device
)
# Move to GPU
pixel_values = batch["pixel_values"].to(vision_device, non_blocking=True)
input_ids = batch["input_ids"].to(language_device, non_blocking=True)
attention_mask = batch["attention_mask"].to(language_device, non_blocking=True)
# Normalize the image
pixel_values = pixel_values / 255.0
pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
pixel_values = pixel_values.to(vision_dtype)
# Generate the captions
generate_ids = llava_model.generate(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
max_new_tokens=args.max_new_tokens,
do_sample=not args.greedy,
suppress_tokens=None,
use_cache=True,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
)
# Trim off the prompts
assert isinstance(generate_ids, torch.Tensor)
generate_ids = generate_ids.tolist()
generate_ids = [
trim_off_prompt(ids, end_of_header_id, end_of_turn_id)
for ids in generate_ids
]
# Decode the captions
captions = tokenizer.batch_decode(
generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
captions = [c.strip() for c in captions]
for path, caption in zip(batch["paths"], captions):
write_caption(Path(path), caption)
pbar.update(len(captions))
def trim_off_prompt(input_ids: list[int], eoh_id: int, eot_id: int) -> list[int]:
# Trim off the prompt
while True:
try:
i = input_ids.index(eoh_id)
except ValueError:
break
input_ids = input_ids[i + 1 :]
# Trim off the end
try:
i = input_ids.index(eot_id)
except ValueError:
return input_ids
return input_ids[:i]
def write_caption(image_path: Path, caption: str):
caption_path = image_path.with_suffix(".txt")
try:
f = os.open(
caption_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL
) # Write-only, create if not exist, fail if exists
except FileExistsError:
logging.warning(f"Caption file '{caption_path}' already exists")
return
except Exception as e:
logging.error(f"Failed to open caption file '{caption_path}': {e}")
return
try:
os.write(f, caption.encode("utf-8"))
os.close(f)
except Exception as e:
logging.error(f"Failed to write caption to '{caption_path}': {e}")
return
class ImageDataset(Dataset):
def __init__(
self,
prompts: list[Prompt],
paths: list[Path],
tokenizer: PreTrainedTokenizer | PreTrainedTokenizerFast,
image_token_id: int,
image_seq_length: int,
):
self.prompts = prompts
self.paths = paths
self.tokenizer = tokenizer
self.image_token_id = image_token_id
self.image_seq_length = image_seq_length
self.pad_token_id = tokenizer.pad_token_id
def __len__(self):
return len(self.paths)
def __getitem__(self, idx: int) -> dict:
path = self.paths[idx]
# Pick a prompt
prompt_str = random.choices(
self.prompts, weights=[p.weight for p in self.prompts]
)[0].prompt
# Preprocess image
# NOTE: I don't use the Processor here and instead do it manually.
# This is because in my testing a simple resize in Pillow yields higher quality results than the Processor,
# and the Processor had some buggy behavior on some images.
# And yes, with the so400m model, the model expects the image to be squished into a square, not padded.
try:
image = Image.open(path)
if image.size != (384, 384):
image = image.resize((384, 384), Image.LANCZOS)
image = image.convert("RGB")
pixel_values = TVF.pil_to_tensor(image)
except Exception as e:
logging.error(f"Failed to load image '{path}': {e}")
pixel_values = None # Will be filtered out later
# Build the conversation
convo = [
{
"role": "system",
"content": "You are a helpful image captioner.",
},
{
"role": "user",
"content": prompt_str,
},
]
# Format the conversation
convo_string = self.tokenizer.apply_chat_template(
convo, tokenize=False, add_generation_prompt=True
)
assert isinstance(convo_string, str)
# Tokenize the conversation
convo_tokens = self.tokenizer.encode(
convo_string, add_special_tokens=False, truncation=False
)
# Repeat the image tokens
input_tokens = []
for token in convo_tokens:
if token == self.image_token_id:
input_tokens.extend([self.image_token_id] * self.image_seq_length)
else:
input_tokens.append(token)
input_ids = torch.tensor(input_tokens, dtype=torch.long)
attention_mask = torch.ones_like(input_ids)
return {
"path": path,
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
def collate_fn(self, batch: list[dict]) -> dict:
# Filter out images that failed to load
batch = [item for item in batch if item["pixel_values"] is not None]
# Pad input_ids and attention_mask
# Have to use left padding because HF's generate can't handle right padding it seems
max_length = max(item["input_ids"].shape[0] for item in batch)
n_pad = [max_length - item["input_ids"].shape[0] for item in batch]
input_ids = torch.stack(
[
torch.nn.functional.pad(
item["input_ids"], (n, 0), value=self.pad_token_id
)
for item, n in zip(batch, n_pad)
]
)
attention_mask = torch.stack(
[
torch.nn.functional.pad(item["attention_mask"], (n, 0), value=0)
for item, n in zip(batch, n_pad)
]
)
# Stack pixel values
pixel_values = torch.stack([item["pixel_values"] for item in batch])
# Paths
paths = [item["path"] for item in batch]
return {
"paths": paths,
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
def parse_prompts(prompt_str: str | None, prompt_file: str | None) -> list[Prompt]:
if prompt_str is not None and prompt_file is not None:
raise ValueError("Cannot specify both --prompt and --prompt-file")
if prompt_str is not None:
return [Prompt(prompt=prompt_str, weight=1.0)]
if prompt_file is None:
raise ValueError("Must specify either --prompt or --prompt-file")
data = json.loads(Path(prompt_file).read_text())
if not isinstance(data, list):
raise ValueError("Expected JSON file to contain a list of prompts")
prompts = []
for item in data:
if isinstance(item, str):
prompts.append(Prompt(prompt=item, weight=1.0))
elif (
isinstance(item, dict)
and "prompt" in item
and "weight" in item
and isinstance(item["prompt"], str)
and isinstance(item["weight"], (int, float))
):
prompts.append(Prompt(prompt=item["prompt"], weight=item["weight"]))
else:
raise ValueError(
f"Invalid prompt in JSON file. Should be either a string or an object with 'prompt' and 'weight' fields: {item}"
)
if len(prompts) == 0:
raise ValueError("No prompts found in JSON file")
if sum(p.weight for p in prompts) <= 0.0:
raise ValueError("Prompt weights must sum to a positive number")
return prompts
def find_images(glob: str | None, filelist: str | Path | None) -> list[Path]:
if glob is None and filelist is None:
raise ValueError("Must specify either --glob or --filelist")
paths = []
if glob is not None:
paths.extend(Path(".").glob(glob))
if filelist is not None:
paths.extend(
(
Path(line.strip())
for line in Path(filelist).read_text().strip().splitlines()
if line.strip() != ""
)
)
return paths
if __name__ == "__main__":
main()

View File

@ -1,75 +0,0 @@
import torch
import torch.amp
import torchvision.transforms.functional as TVF
from PIL import Image
from transformers import AutoTokenizer, LlavaForConditionalGeneration
IMAGE_PATH = "C:/Users/27698/Desktop/node/12/00001.png"
PROMPT = "Write a long descriptive caption for this image in a formal tone."
MODEL_NAME = "fancyfeast/llama-joycaption-alpha-two-hf-llava"
# Load JoyCaption
# bfloat16 is the native dtype of the LLM used in JoyCaption (Llama 3.1)
# device_map=0 loads the model into the first GPU
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)
llava_model = LlavaForConditionalGeneration.from_pretrained(MODEL_NAME, torch_dtype="bfloat16", device_map="cuda:0")
llava_model.eval()
with torch.no_grad():
# Load and preprocess image
# Normally you would use the Processor here, but the image module's processor
# has some buggy behavior and a simple resize in Pillow yields higher quality results
image = Image.open(IMAGE_PATH)
if image.size != (384, 384):
image = image.resize((384, 384), Image.LANCZOS)
image = image.convert("RGB")
pixel_values = TVF.pil_to_tensor(image)
# Normalize the image
pixel_values = pixel_values / 255.0
pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
pixel_values = pixel_values.to(torch.bfloat16).unsqueeze(0)
# Build the conversation
convo = [
{
"role": "system",
"content": "You are a helpful image captioner.",
},
{
"role": "user",
"content": PROMPT,
},
]
# Format the conversation
convo_string = tokenizer.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
# Tokenize the conversation
convo_tokens = tokenizer.encode(convo_string, add_special_tokens=False, truncation=False)
# Repeat the image tokens
input_tokens = []
for token in convo_tokens:
if token == llava_model.config.image_token_index:
input_tokens.extend([llava_model.config.image_token_index] * llava_model.config.image_seq_length)
else:
input_tokens.append(token)
input_ids = torch.tensor(input_tokens, dtype=torch.long).unsqueeze(0)
attention_mask = torch.ones_like(input_ids)
# Generate the caption
generate_ids = llava_model.generate(input_ids=input_ids.to('cuda'), pixel_values=pixel_values.to('cuda'), attention_mask=attention_mask.to('cuda'), max_new_tokens=300, do_sample=True, suppress_tokens=None, use_cache=True)[0]
# Trim off the prompt
generate_ids = generate_ids[input_ids.shape[1]:]
# Decode the caption
caption = tokenizer.decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
caption = caption.strip()
print(caption)

View File

@ -1,2 +0,0 @@
@echo off
pyinstaller -F --upx-dir="C:\\Users\\27698\\Desktop\\upx-4.2.4-win64\upx.exe" lama_inpaint.py

View File

@ -1,173 +0,0 @@
import io
import os
import sys
from typing import Union
import cv2
import torch
import numpy as np
from PIL import Image
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
# 判断sys.argv 的长度如果小于2说明没有传入参数设置初始参数
# if len(sys.argv) < 2:
# sys.argv = [
# "C:/Users/27698/Desktop/LAITool/resources/scripts/lama/lama_inpaint.exe",
# "-l",
# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\mask\\temp\\1717508661218.png",
# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\mask\\mask_temp_1717508662659.png",
# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\mask\\temp\\1717508564042.png",
# ]
print(sys.argv)
if getattr(sys, "frozen", False):
cript_directory = os.path.dirname(sys.executable)
elif __file__:
cript_directory = os.path.dirname(__file__)
link_name = os.path.join(os.path.expanduser("~"), "big_lama.pt")
cu_name = os.path.join(cript_directory, "model\\big-lama.pt")
mode_pa = link_name
if len(sys.argv) < 2:
# # 判断model_path是否存在如果不存在设置默认值
if not os.path.exists(link_name):
os.system(f'mklink "{link_name}" "{cu_name}"')
print("Params: <runtime-config.json>")
sys.exit(0)
def get_image(image):
if isinstance(image, Image.Image):
img = np.array(image)
elif isinstance(image, np.ndarray):
img = image.copy()
else:
raise Exception("Input image should be either PIL Image or numpy array!")
if img.ndim == 3:
img = np.transpose(img, (2, 0, 1)) # chw
elif img.ndim == 2:
img = img[np.newaxis, ...]
assert img.ndim == 3
img = img.astype(np.float32) / 255
return img
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def scale_image(img, factor, interpolation=cv2.INTER_AREA):
if img.shape[0] == 1:
img = img[0]
else:
img = np.transpose(img, (1, 2, 0))
img = cv2.resize(img, dsize=None, fx=factor, fy=factor, interpolation=interpolation)
if img.ndim == 2:
img = img[None, ...]
else:
img = np.transpose(img, (2, 0, 1))
return img
def pad_img_to_modulo(img, mod):
channels, height, width = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(
img,
((0, 0), (0, out_height - height), (0, out_width - width)),
mode="symmetric",
)
def prepare_img_and_mask(image, mask, device, pad_out_to_modulo=8, scale_factor=None):
out_image = get_image(image)
out_mask = get_image(mask)
if scale_factor is not None:
out_image = scale_image(out_image, 1)
out_mask = scale_image(out_mask, scale_factor, interpolation=cv2.INTER_NEAREST)
if pad_out_to_modulo is not None and pad_out_to_modulo > 1:
out_image = pad_img_to_modulo(out_image, pad_out_to_modulo)
out_mask = pad_img_to_modulo(out_mask, pad_out_to_modulo)
out_image = torch.from_numpy(out_image).unsqueeze(0).to(device)
out_mask = torch.from_numpy(out_mask).unsqueeze(0).to(device)
out_mask = (out_mask > 0) * 1
return out_image, out_mask
class LamaInpaint:
def __init__(
self,
device,
model_path=None,
) -> None:
if model_path is None:
model_path = os.path.join(cript_directory, "model\\big-lama.pt")
self.model = torch.jit.load(model_path, map_location=device)
self.model.eval()
self.model.to(device)
self.device = device
def run(
self,
image: Union[Image.Image, np.ndarray],
mask: Union[Image.Image, np.ndarray],
):
if isinstance(image, np.ndarray):
orig_height, orig_width = image.shape[:2]
else:
orig_height, orig_width = np.array(image).shape[:2]
# image_width = image.shape[1]
# mask_width = mask.shape[1]
scale = image.width / mask.width
image, mask = prepare_img_and_mask(image, mask, self.device, 8, scale)
with torch.inference_mode():
inpainted = self.model(image, mask)
cur_res = inpainted[0].permute(1, 2, 0).detach().cpu().numpy()
cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8")
cur_res = cur_res[:orig_height, :orig_width]
return cur_res
try:
de = "cpu"
if torch.cuda.is_available():
de = "cuda"
lama = LamaInpaint(de, mode_pa)
image_path = sys.argv[2]
mask_path = sys.argv[3]
output_path = sys.argv[4]
# 若是没有传递mask_path需要自己计算mask区域
# 使用Image.open打开图片
image = Image.open(image_path).convert("RGB")
mask = Image.open(mask_path).convert("L")
res = lama.run(image, mask)
# 将修复后的图片保存到本地
img = Image.fromarray(res)
# 使用 save 方法将图像保存到文件
img.save(output_path)
sys.exit(0)
except Exception as e:
print(e)
sys.exit(str(e))

View File

@ -1,43 +0,0 @@
# -*- mode: python ; coding: utf-8 -*-
a = Analysis(
['lama_inpaint.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='lama_inpaint',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='lama_inpaint',
)

View File

@ -1,2 +0,0 @@
@echo off
pyinstaller --upx-dir="C:\\Users\\27698\\Desktop\\upx-4.2.4-win64\upx.exe" local_whisper.py

View File

@ -1,170 +0,0 @@
# -*- coding: utf-8 -*-
import io
import os
import sys
import public_tools
from pathlib import Path
from huggingface_hub import hf_hub_download
from faster_whisper import WhisperModel
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
# 判断sys.argv 的长度如果小于2说明没有传入参数设置初始参数
# "C:\\Users\\27698\\Desktop\\LAITool\\resources\\scripts\\Lai.exe" -c "D:/来推项目集/7.4/娱乐:江湖大哥退休,去拍电影/scripts/output_crop_00001.json" "NVIDIA"
# if len(sys.argv) < 2:
# sys.argv = [
# "C:\\Users\\27698\\Desktop\\LAITool\\resources\\scripts\\Lai.exe",
# "-w",
# "C:\\Users\\27698\\Desktop\\测试\\test\\mjTestoutput_crop_00001.mp4",
# "C:\\Users\\27698\\Desktop\\测试\\test\data\\frame",
# "C:\\Users\\27698\\Desktop\\测试\\test\\tmp\\input_crop",
# 30,
# "NVIDIA",
# ]
print(sys.argv)
if len(sys.argv) < 2:
print("Params: <runtime-config.json>")
exit(0)
if getattr(sys, "frozen", False):
cript_directory = os.path.dirname(sys.executable)
elif __file__:
cript_directory = os.path.dirname(__file__)
def GetText(out_folder, mp3_folder):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
# 拿到指定文件夹里面的所有的MP3文件
mp3_list = []
for root, dirs, files in os.walk(mp3_folder):
for file in files:
if file.endswith(".mp3"):
mp3_list.append(os.path.join(root, file))
for mp in mp3_list:
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
print("文本全部识别成功,正在写出")
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, "文案.txt"))
print("写出完成")
sys.stdout.flush()
def GetTextTask(out_folder, mp, name):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, name + ".txt"))
sys.stdout.flush()
# GetTextTask(
# "C:\\Users\\27698\\Desktop\\测试\\mjTest",
# "C:\\Users\\27698\\Desktop\\测试\\mjTest\\data\\frame\\00001.mp4",
# "00001",
# )
if sys.argv[1] == "-ts":
GetText(
sys.argv[2],
sys.argv[3],
)
elif sys.argv[1] == "-t":
GetTextTask(
sys.argv[2],
sys.argv[3],
sys.argv[4],
)
else:
print("Params: <runtime-config.json>")
exit(0)

View File

@ -1,50 +0,0 @@
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.building.datastruct import Tree
from PyInstaller.utils.hooks import get_package_paths
PACKAGE_DIRECTORY = get_package_paths('faster_whisper')[1]
datas = [(PACKAGE_DIRECTORY, 'faster_whisper')]
a = Analysis(
['local_whisper.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=0,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='local_whisper',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='local_whisper',
)

View File

@ -1,351 +0,0 @@
# 读取文件的方法
import json
import os
import win32api
import win32con
import pywintypes
import shutil
import re
class PublicTools:
"""
一些公用的基础方法
"""
def delete_path(self, path):
"""
删除指定路径的文件或者是文件夹
"""
# 检查路径是否存在
if not os.path.exists(path):
return
# 检查路径是文件还是文件夹
if os.path.isfile(path):
# 是文件,执行删除
try:
os.remove(path)
except Exception as e:
raise e
elif os.path.isdir(path):
# 是文件夹,执行删除
try:
shutil.rmtree(path)
except Exception as e:
raise e
else:
raise
def list_files_by_extension(self, folder_path, extension):
"""
读取指定文件夹下面的所有的指定拓展文件命的文件列表
"""
file_list = []
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith(extension):
file_list.append(os.path.join(root, file))
elif file.endswith(extension.upper()):
file_list.append(os.path.join(root, file))
return file_list
def get_fonts_from_registry(self, key_path):
"""
获取注册表中安装的字体文件
"""
font_names = []
try:
key = win32api.RegOpenKeyEx(
(
win32con.HKEY_LOCAL_MACHINE
if "HKEY_LOCAL_MACHINE" in key_path
else win32con.HKEY_CURRENT_USER
),
key_path.split("\\", 1)[1],
0,
win32con.KEY_READ,
)
i = 0
while True:
try:
value = win32api.RegEnumValue(key, i)
font_name = value[0]
# 使用正则表达式移除括号及其内容
font_name = re.sub(r"\s*\([^)]*\)$", "", font_name)
font_names.append(font_name)
i += 1
except pywintypes.error as e:
if e.winerror == 259: # 没有更多的数据
break
else:
raise
finally:
try:
win32api.RegCloseKey(key)
except:
pass
return font_names
def get_installed_fonts(self):
"""
获取字体文件名称并返回
"""
system_fonts = self.get_fonts_from_registry(
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts"
)
user_fonts = self.get_fonts_from_registry(
"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows NT\\CurrentVersion\\Fonts"
)
all_fonts = list(set(system_fonts + user_fonts)) # 合并并去重
return all_fonts
# 将RRGGBB转换为BBGGRR
def convert_rrggbb_to_bbggrr(self, rrggbb):
"""
将RRGGBB转换为BBGGRR
"""
if len(rrggbb) == 7:
rr = rrggbb[1:3]
gg = rrggbb[3:5]
bb = rrggbb[5:7]
return bb + gg + rr
else:
return "Invalid input"
def write_to_file(self, arr, filename):
with open(filename, "w",encoding='utf-8') as f:
for item in arr:
f.write("%s\n" % item)
# 读取文件
def read_file(fileType):
txt_path = input(f"输入{fileType}文件路径:")
txt_path = remove_prefix_and_suffix(txt_path, '"', '"')
while txt_path.strip() == "":
txt_path = input(f"输入{fileType}文件路径:")
while os.path.exists(txt_path) == False:
print("文件路径不存在错误:")
txt_path = input(f"输入{fileType}文件路径:")
txt_path = remove_prefix_and_suffix(txt_path, '"', '"')
return txt_path
def format_time_ms(milliseconds):
"""
时间转换将ms->小时:分钟:.毫秒格式
"""
seconds = milliseconds / 1000
# 计算小时、分钟和秒
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
seconds = seconds % 60
# 格式化字符串
# 使用`%02d`确保小时和分钟总是显示为两位数,`%.2f`确保秒数显示两位小数
formatted_time = f"{hours}:{minutes:02d}:{seconds:05.2f}"
return formatted_time
# 删除满足条件的开头和结尾
def remove_prefix_and_suffix(input_str, prefix_to_remove, suffix_to_remove):
if input_str.startswith(prefix_to_remove):
# 删除开头
input_str = input_str[len(prefix_to_remove) :]
if input_str.endswith(suffix_to_remove):
# 删除结尾
input_str = input_str[: -len(suffix_to_remove)]
return input_str
# 判断文件夹下面是不是有特定的文件夹
def check_if_folder_exists(parent_folder, target_folder_name):
# 获取文件夹列表
subfolders = [f.name for f in os.scandir(parent_folder) if f.is_dir()]
# 检查特定文件夹是否存在
if target_folder_name in subfolders:
return True
else:
return False
# 检查指定文件夹中是否存在特定文件。
def file_exists_in_folder(folder_path: str, file_name: str) -> bool:
# 构建完整的文件路径
file_path = os.path.join(folder_path, file_name)
# 返回文件是否存在
return os.path.isfile(file_path)
# 秒数转换,保留一位小数
def convert_to_seconds(number, count):
seconds = number / 1000000
rounded_number = round(seconds, count)
return rounded_number
def is_empty(obj):
if obj is None:
return True
elif isinstance(obj, str):
return len(obj) == 0
elif isinstance(obj, list):
return len(obj) == 0
elif isinstance(obj, dict):
return len(obj) == 0
return False
def opt_dict(obj, key, default=None):
if obj is None:
return default
if key in obj:
v = obj[key]
if not is_empty(v):
return v
return default
def read_config(path, webui=True):
with open(path, "r", encoding="utf-8") as f:
runtime_config = json.load(f)
if "config" not in runtime_config:
print("no filed 'config' in json")
return None
config = runtime_config["config"]
if "webui" not in config:
print("no filed 'webui' in 'config'")
return None
setting_config_path = config["setting"]
if not os.path.exists(setting_config_path):
setting_config_path = "config/" + setting_config_path
if not os.path.exists(setting_config_path):
setting_config_path = "../" + setting_config_path
# read config
with open(setting_config_path, "r", encoding="utf-8") as f:
setting_config = json.load(f)
# set workspace parent:根目录
if "workspace" in setting_config:
setting_config["workspace"]["parent"] = runtime_config["workspace"]
else:
setting_config["workspace"] = {"parent": runtime_config["workspace"]}
setting_config["video"] = opt_dict(runtime_config, "video")
# merge setting config
if "setting" in config:
setting_config.update(runtime_config["setting"])
# webui config
if webui:
webui_config_path = config["webui"]
if not os.path.exists(webui_config_path):
webui_config_path = "config/webui/" + webui_config_path
if not os.path.exists(webui_config_path):
webui_config_path = "../" + webui_config_path
with open(webui_config_path, "r", encoding="utf-8") as f:
webui_config = json.load(f)
# merge webui config
if "webui" in runtime_config:
webui_config.update(runtime_config["webui"])
return webui_config, setting_config
return setting_config
TAG_MODE_NONE = ""
# 工作路径
class Workspace:
def __init__(
self,
root: str,
input: str,
output: str,
input_crop: str,
output_crop: str,
input_tag: str,
input_mask: str,
input_crop_mask: str,
crop_info: str,
):
self.root = root
self.input = input
self.output = output
self.input_crop = input_crop
self.output_crop = output_crop
self.input_tag = input_tag
self.input_mask = input_mask
self.input_crop_mask = input_crop_mask
self.crop_info = crop_info
# 定义一个倍数函数
def round_up(num, mul):
return (num // mul + 1) * mul
class SettingConfig:
def __init__(self, config: dict, workParent):
self.config = config
self.webui_work_api = None
self.workParent = workParent
def to_dict(self):
return self.__dict__
def get_tag_mode(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "mode", TAG_MODE_NONE)
def get_tag_actions(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "actions", [])
def get_workspace_config(self) -> Workspace:
workspace_config = opt_dict(self.config, "workspace")
tmp_config = opt_dict(workspace_config, "tmp")
input = opt_dict(workspace_config, "input", "input")
output = opt_dict(workspace_config, "output", "output")
workspace_parent = self.workParent
tmp_parent = opt_dict(tmp_config, "parent", "tmp")
input_crop = opt_dict(tmp_config, "input_crop", "input_crop")
output_crop = opt_dict(tmp_config, "output_crop", "output_crop")
input_tag = opt_dict(tmp_config, "input_tag", "input_crop")
input_mask = opt_dict(tmp_config, "input_mask", "input_mask")
input_crop_mask = opt_dict(tmp_config, "input_crop_mask", "input_crop_mask")
crop_info = opt_dict(tmp_config, "crop_info", "crop_info.txt")
tmp_path = os.path.join(workspace_parent, tmp_parent)
return Workspace(
workspace_parent,
os.path.join(workspace_parent, input),
os.path.join(workspace_parent, output),
os.path.join(tmp_path, input_crop),
os.path.join(tmp_path, output_crop),
os.path.join(tmp_path, input_tag),
os.path.join(tmp_path, input_mask),
os.path.join(tmp_path, input_crop_mask),
os.path.join(tmp_path, crop_info),
)
def enable_tag(self):
tag_cfg = opt_dict(self.config, "tag")
return opt_dict(tag_cfg, "enable", True)

View File

@ -1,307 +0,0 @@
# pip install scenedetect opencv-python -i https://pypi.tuna.tsinghua.edu.cn/simple
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
from scenedetect.stats_manager import StatsManager
from scenedetect.detectors.content_detector import ContentDetector
import os
import sys
import json
import subprocess
from huggingface_hub import hf_hub_download
from faster_whisper import WhisperModel
from pathlib import Path
import public_tools
# 获取智能画面分割的时间或者秒数
def find_scenes(video_path, sensitivity):
print(
"正在计算分镜数据" + "sensitivity" + str(sensitivity) + "path : " + video_path
)
sys.stdout.flush()
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
scene_manager = SceneManager(stats_manager)
# 使用contect-detector
scene_manager.add_detector(ContentDetector(threshold=float(sensitivity)))
shijian_list = []
try:
video_manager.set_downscale_factor()
video_manager.start()
scene_manager.detect_scenes(frame_source=video_manager)
scene_list = scene_manager.get_scene_list()
print("分镜数据列表:")
sys.stdout.flush()
for i, scene in enumerate(scene_list):
shijian_list.append([scene[0].get_timecode(), scene[1].get_timecode()])
print(
"Scene %2d: Start %s / Frame %d, End %s / Frame %d"
% (
i + 1,
scene[0].get_timecode(),
scene[0].get_frames(),
scene[1].get_timecode(),
scene[1].get_frames(),
)
)
sys.stdout.flush()
finally:
video_manager.release()
return shijian_list
# 如果不存在就创建
def createDir(file_dir):
# 如果不存在文件夹,就创建
if not os.path.isdir(file_dir):
os.mkdir(file_dir)
# 切分一个视频
def ClipVideo(video_path, out_folder, image_out_folder, sensitivity, gpu_type):
shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表
shijian_list_len = len(shijian_list)
print("总共有%s个场景" % str(shijian_list_len))
sys.stdout.flush()
video_list = []
for i in range(0, shijian_list_len):
start_time_str = shijian_list[i][0]
end_time_str = shijian_list[i][1]
print("开始输出第" + str(i + 1) + "个分镜")
video_name = "{:05d}".format(i + 1)
out_video_file = os.path.join(out_folder, video_name + ".mp4")
sys.stdout.flush()
video_list.append(
{
"start_time_str": start_time_str,
"end_time_str": end_time_str,
"out_video_file": out_video_file,
"video_name": video_name,
}
)
# 使用 ffmpeg 裁剪视频
command = []
command.append("ffmpeg")
command.append("-i")
command.append(video_path)
command.append("-ss")
command.append(start_time_str)
command.append("-to")
command.append(end_time_str)
command.append("-c:v")
if gpu_type == "NVIDIA":
command.append("h264_nvenc")
elif gpu_type == "AMD":
command.append("h264_amf")
else:
command.append("libx264")
command.append("-preset")
command.append("fast")
command.append("-c:a")
command.append("copy")
command.append(out_video_file)
command.append("-loglevel")
command.append("error")
subprocess.run(
command,
check=True,
stderr=subprocess.PIPE,
)
print("分镜输出完成。开始抽帧")
sys.stdout.flush()
for vi in video_list:
h, m, s = vi["start_time_str"].split(":")
start_seconds = int(h) * 3600 + int(m) * 60 + float(s)
h, m, s = vi["end_time_str"].split(":")
end_seconds = int(h) * 3600 + int(m) * 60 + float(s)
print("正在抽帧:" + vi["video_name"])
sys.stdout.flush()
subprocess.run(
[
"ffmpeg",
"-ss",
str((end_seconds - start_seconds) / 2),
"-i",
vi["out_video_file"],
"-frames:v",
"1",
os.path.join(image_out_folder, vi["video_name"] + ".png"),
"-loglevel",
"error",
]
)
print("抽帧完成,开始识别文案")
sys.stdout.flush()
return video_list
def SplitAudio(video_out_folder, video_list):
# ffmpeg -i input_file.mp4 -vn -ab 128k output_file.mp3
print("正在分离音频!!")
mp3_list = []
sys.stdout.flush()
for v in video_list:
mp3_path = os.path.join(video_out_folder, v["video_name"] + ".mp3")
mp3_list.append(mp3_path)
subprocess.run(
[
"ffmpeg",
"-i",
v["out_video_file"],
"-vn",
"-ab",
"128k",
mp3_path,
"-loglevel",
"error",
],
check=True,
)
return mp3_list
def GetText(out_folder, mp3_list):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
for mp in mp3_list:
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
print("文本全部识别成功,正在写出")
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, "文案.txt"))
print("写出完成")
sys.stdout.flush()
def GetTextTask(out_folder, mp, name):
text = []
# 先获取模型
print("正在下载或加载模型")
sys.stdout.flush()
model_path = Path(
hf_hub_download(repo_id="Systran/faster-whisper-large-v3", filename="model.bin")
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="preprocessor_config.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="tokenizer.json",
)
hf_hub_download(
repo_id="Systran/faster-whisper-large-v3",
filename="vocabulary.json",
)
model = WhisperModel(
model_size_or_path=os.path.dirname(model_path),
device="auto",
local_files_only=True,
)
print("模型加载成功,开始识别")
sys.stdout.flush()
segments, info = model.transcribe(
mp,
beam_size=5,
language="zh",
vad_filter=True,
vad_parameters=dict(min_silence_duration_ms=1000),
)
tmp_text = ""
for segment in segments:
tmp_text += segment.text + ""
print(mp + "识别完成")
sys.stdout.flush()
text.append(tmp_text)
# 数据写出
sys.stdout.flush()
tools = public_tools.PublicTools()
tools.write_to_file(text, os.path.join(out_folder, name + ".txt"))
sys.stdout.flush()
def get_fram(video_path, out_path, sensitivity):
try:
shijian_list = find_scenes(video_path, sensitivity) # 多组时间列表
print("总共有%s个场景" % str(len(shijian_list)))
print("开始输出json")
print(shijian_list)
# 将数组中的消息写道json文件中
with open(out_path, "w") as file:
# 将数组写入到指定的json文件
json.dump(shijian_list, file)
print("输出完成")
except Exception as e:
print("出现错误" + str(e))
exit(0)
def init(video_path, video_out_folder, image_out_folder, sensitivity, gpu_type):
v_l = ClipVideo(
video_path, video_out_folder, image_out_folder, sensitivity, gpu_type
)
# 开始分离音频
m_l = SplitAudio(video_out_folder, v_l)
# 开始识别字幕
GetText(os.path.dirname(video_out_folder), m_l)

View File

@ -0,0 +1,10 @@
export const SoftwareData = {
"version": "V3.3.9",
"date": "2025-05-29",
"notes": [
"修复了音频处理的错误。",
"改进了视频处理的性能。",
"更新了依赖库以提高安全性。"
]
}

View File

@ -1,7 +1,7 @@
import Realm, { UpdateMode } from 'realm' import Realm, { UpdateMode } from 'realm'
import { BookModel } from '../../model/Book/book.js' import { BookModel } from '../../model/Book/book.js'
import path from 'path' import path from 'path'
import { define } from '../../../define.js' import { define } from '../../../define'
import { BookImageCategory, BookTaskStatus, BookType } from '../../../enum/bookEnum.js' import { BookImageCategory, BookTaskStatus, BookType } from '../../../enum/bookEnum.js'
import { successMessage } from '../../../../main/Public/generalTools' import { successMessage } from '../../../../main/Public/generalTools'
import { CheckFolderExistsOrCreate, CopyFileOrFolder } from '../../../Tools/file' import { CheckFolderExistsOrCreate, CopyFileOrFolder } from '../../../Tools/file'

View File

@ -1,163 +0,0 @@
let define = {}
const isBrowser = typeof window !== 'undefined' && typeof window.document !== 'undefined'
if (!isBrowser) {
const path = require('path')
const { app } = require('electron')
if (!app.isPackaged) {
define = {
discordScript: path.join(__dirname, '../../src/main/discord/discordScript.js'),
zhanwei_image: path.join(__dirname, '../../resources/image/zhanwei.png'),
config_path: path.join(__dirname, '../../resources/config/global_setting.json'),
clip_setting: path.join(__dirname, '../../resources/config/clip_setting.json'),
sd_setting: path.join(__dirname, '../../resources/config/sd_config.json'),
dynamic_setting: path.join(__dirname, '../../resources/config/dynamic_setting.json'),
tag_setting: path.join(__dirname, '../../resources/config/tag_setting.json'),
img_base: path.join(__dirname, '../../resources/config/img_base.json'),
video_config: path.join(__dirname, '../../resources/config/video_config.json'),
system_config: path.join(__dirname, '../../resources/config/system_config.json'),
scripts_path: path.join(__dirname, '../../resources/scripts'),
db_path: path.join(__dirname, '../../resources/scripts/db'),
project_path: path.join(__dirname, '../../project'),
tts_path: path.join(__dirname, '../../tts'),
logger_path: path.join(__dirname, '../../resources/logger'),
package_path: path.join(__dirname, '../../resources/package'),
image_path: path.join(__dirname, '../../resources/image'),
temp_sd_image: path.join(__dirname, '../../resources/image/TempSDImage'),
draft_temp_path: path.join(__dirname, '../../resources/tmp/temp.zip'),
init_config_path: path.join(__dirname, '../../resources/tmp/config'),
clip_speed_temp_path: path.join(__dirname, '../../resources/tmp/Clip/speeds_tmp.json'),
add_canvases_temp_path: path.join(__dirname, '../../resources/tmp/Clip/canvases_tmp.json'),
add_sound_channel_mappings_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/sound_channel_mappings_tmp.json'
),
add_vocal_separations_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/vocal_separations_tmp.json'
),
add_material_video_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/videoMaterialTemp.json'
),
add_tracks_segments_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/tracks_segments_tmp.json'
),
add_tracks_type_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/tracks_type_tmp.json'
),
add_material_animations_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/material_animations_tmp.json'
),
add_material_text_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/material_text_temp.json'
),
add_track_text_segments_temp_path: path.join(
__dirname,
'../../resources/tmp/Clip/track_text_segments_temp.json'
),
add_materials_beats_tmp_path: path.join(
__dirname,
'../../resources/tmp/Clip/materials_beats_tmp.json'
),
add_materials_audios_tmp_path: path.join(
__dirname,
'../../resources/tmp/Clip/materials_audios_tmp.json'
),
add_tracks_audio_segments_tmp_path: path.join(
__dirname,
'../../resources/tmp/Clip/tracks_audio_segments_tmp.json'
),
add_keyframe_tmp_path: path.join(__dirname, '../../resources/tmp/Clip/keyframe_tmp.json')
}
} else {
define = {
zhanwei_image: path.join(__dirname, '../../../resources/image/zhanwei.png'),
config_path: path.join(__dirname, '../../../resources/config/global_setting.json'),
clip_setting: path.join(__dirname, '../../../resources/config/clip_setting.json'),
sd_setting: path.join(__dirname, '../../../resources/config/sd_config.json'),
dynamic_setting: path.join(__dirname, '../../../resources/config/dynamic_setting.json'),
tag_setting: path.join(__dirname, '../../../resources/config/tag_setting.json'),
video_config: path.join(__dirname, '../../../resources/config/video_config.json'),
system_config: path.join(__dirname, '../../../resources/config/system_config.json'),
img_base: path.join(__dirname, '../../../resources/config/img_base.json'),
scripts_path: path.join(__dirname, '../../../resources/scripts'),
db_path: path.join(__dirname, '../../../resources/scripts/db'),
project_path: path.join(__dirname, '../../../project'),
tts_path: path.join(__dirname, '../../../tts'),
logger_path: path.join(__dirname, '../../../resources/logger'),
package_path: path.join(__dirname, '../../../resources/package'),
discordScript: path.join(__dirname, '../../../resources/scripts/discordScript.js'),
image_path: path.join(__dirname, '../../../resources/image'),
temp_sd_image: path.join(__dirname, '../../../resources/image/TempSDImage'),
draft_temp_path: path.join(__dirname, '../../../resources/tmp/temp.zip'),
init_config_path: path.join(__dirname, '../../../resources/tmp/config'),
clip_speed_temp_path: path.join(__dirname, '../../../resources/tmp/Clip/speeds_tmp.json'),
add_canvases_temp_path: path.join(__dirname, '../../../resources/tmp/Clip/canvases_tmp.json'),
add_sound_channel_mappings_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/sound_channel_mappings_tmp.json'
),
add_vocal_separations_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/vocal_separations_tmp.json'
),
add_material_video_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/videoMaterialTemp.json'
),
add_tracks_segments_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/tracks_segments_tmp.json'
),
add_tracks_type_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/tracks_type_tmp.json'
),
add_material_animations_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/material_animations_tmp.json'
),
add_material_text_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/material_text_temp.json'
),
add_track_text_segments_temp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/track_text_segments_temp.json'
),
add_materials_beats_tmp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/materials_beats_tmp.json'
),
add_materials_audios_tmp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/materials_audios_tmp.json'
),
add_tracks_audio_segments_tmp_path: path.join(
__dirname,
'../../../resources/tmp/Clip/tracks_audio_segments_tmp.json'
),
add_keyframe_tmp_path: path.join(__dirname, '../../../resources/tmp/Clip/keyframe_tmp.json')
}
}
}
define['remotemj_api'] = 'https://api.laitool.net/'
define['serverUrl'] = 'http://lms.laitool.cn'
// define['serverUrl'] = 'https://localhost:44362'
define['hkServerUrl'] = 'https://laitool.net/'
define['bakServerUrl'] = 'https://laitool.net/'
define['API'] = 'f85d39ed5a40fd09966f13f12b6cf0f0'
// define['lms'] =
// process.env.NODE_ENV == 'development' ? 'https://localhost:44362' : 'https://lms.laitool.cn'
define['lms'] = 'https://lms.laitool.cn'
export { define }

178
src/define/define.ts Normal file
View File

@ -0,0 +1,178 @@
// 检测当前环境
const isBrowser = typeof window !== 'undefined' && typeof window.document !== 'undefined'
// 浏览器环境的路径处理函数
function joinPath(...segments: string[]): string {
return segments.join('/').replace(/\/+/g, '/');
}
// 统一的路径处理函数
const pathJoin = isBrowser ? joinPath : require('path').join;
// 定义类型接口
interface DefineConfig {
// 脚本和可执行文件路径
discordScript: string | null;
// 图片资源路径
zhanwei_image: string;
image_path: string;
temp_sd_image: string;
// 配置文件路径
config_path: string | null;
clip_setting: string | null;
sd_setting: string | null;
dynamic_setting: string | null;
tag_setting: string | null;
img_base: string | null;
video_config: string | null;
system_config: string | null;
// 目录路径
scripts_path: string | null;
db_path: string | null;
project_path: string | null;
tts_path: string | null;
logger_path: string | null;
package_path: string | null;
// 临时文件路径
draft_temp_path: string | null;
init_config_path: string | null;
// Clip 相关临时文件路径
clip_speed_temp_path: string | null;
add_canvases_temp_path: string | null;
add_sound_channel_mappings_temp_path: string | null;
add_vocal_separations_temp_path: string | null;
add_material_video_temp_path: string | null;
add_tracks_segments_temp_path: string | null;
add_tracks_type_temp_path: string | null;
add_material_animations_temp_path: string | null;
add_material_text_temp_path: string | null;
add_track_text_segments_temp_path: string | null;
add_materials_beats_tmp_path: string | null;
add_materials_audios_tmp_path: string | null;
add_tracks_audio_segments_tmp_path: string | null;
add_keyframe_tmp_path: string | null;
// API 配置
remotemj_api: string;
serverUrl: string;
hkServerUrl: string;
bakServerUrl: string;
API: string;
lms: string;
}
let define: DefineConfig = {} as DefineConfig;
if (!isBrowser) {
const path = require('path')
const { app } = require('electron')
// 根据打包状态确定基础路径
const basePath = !app.isPackaged ?
path.join(__dirname, '../../') :
path.join(__dirname, '../../../');
define = {
discordScript: !app.isPackaged ? pathJoin(__dirname, '../../src/main/discord/discordScript.js') : pathJoin(basePath, 'resources/scripts/discordScript.js'),
zhanwei_image: pathJoin(basePath, 'resources/image/zhanwei.png'),
config_path: pathJoin(basePath, 'resources/config/global_setting.json'),
clip_setting: pathJoin(basePath, 'resources/config/clip_setting.json'),
sd_setting: pathJoin(basePath, 'resources/config/sd_config.json'),
dynamic_setting: pathJoin(basePath, 'resources/config/dynamic_setting.json'),
tag_setting: pathJoin(basePath, 'resources/config/tag_setting.json'),
img_base: pathJoin(basePath, 'resources/config/img_base.json'),
video_config: pathJoin(basePath, 'resources/config/video_config.json'),
system_config: pathJoin(basePath, 'resources/config/system_config.json'),
scripts_path: pathJoin(basePath, 'resources/scripts'),
db_path: pathJoin(basePath, 'resources/scripts/db'),
project_path: pathJoin(basePath, 'project'),
tts_path: pathJoin(basePath, 'tts'),
logger_path: pathJoin(basePath, 'resources/logger'),
package_path: pathJoin(basePath, 'resources/package'),
image_path: pathJoin(basePath, 'resources/image'),
temp_sd_image: pathJoin(basePath, 'resources/image/TempSDImage'),
draft_temp_path: pathJoin(basePath, 'resources/tmp/temp.zip'),
init_config_path: pathJoin(basePath, 'resources/tmp/config'),
clip_speed_temp_path: pathJoin(basePath, 'resources/tmp/Clip/speeds_tmp.json'),
add_canvases_temp_path: pathJoin(basePath, 'resources/tmp/Clip/canvases_tmp.json'),
add_sound_channel_mappings_temp_path: pathJoin(basePath, 'resources/tmp/Clip/sound_channel_mappings_tmp.json'),
add_vocal_separations_temp_path: pathJoin(basePath, 'resources/tmp/Clip/vocal_separations_tmp.json'),
add_material_video_temp_path: pathJoin(basePath, 'resources/tmp/Clip/videoMaterialTemp.json'),
add_tracks_segments_temp_path: pathJoin(basePath, 'resources/tmp/Clip/tracks_segments_tmp.json'),
add_tracks_type_temp_path: pathJoin(basePath, 'resources/tmp/Clip/tracks_type_tmp.json'),
add_material_animations_temp_path: pathJoin(basePath, 'resources/tmp/Clip/material_animations_tmp.json'),
add_material_text_temp_path: pathJoin(basePath, 'resources/tmp/Clip/material_text_temp.json'),
add_track_text_segments_temp_path: pathJoin(basePath, 'resources/tmp/Clip/track_text_segments_temp.json'),
add_materials_beats_tmp_path: pathJoin(basePath, 'resources/tmp/Clip/materials_beats_tmp.json'),
add_materials_audios_tmp_path: pathJoin(basePath, 'resources/tmp/Clip/materials_audios_tmp.json'),
add_tracks_audio_segments_tmp_path: pathJoin(basePath, 'resources/tmp/Clip/tracks_audio_segments_tmp.json'),
add_keyframe_tmp_path: pathJoin(basePath, 'resources/tmp/Clip/keyframe_tmp.json'),
// API 配置
remotemj_api: 'https://api.laitool.net/',
serverUrl: 'http://lms.laitool.cn',
hkServerUrl: 'https://laitool.net/',
bakServerUrl: 'https://laitool.net/',
API: 'f85d39ed5a40fd09966f13f12b6cf0f0',
lms: 'https://lms.laitool.cn'
};
} else {
// 浏览器环境
const basePath = './';
define = {
discordScript: null,
zhanwei_image: pathJoin(basePath, 'resources/image/zhanwei.png'),
config_path: null,
clip_setting: null,
sd_setting: null,
dynamic_setting: null,
tag_setting: null,
img_base: null,
video_config: null,
system_config: null,
scripts_path: null,
db_path: null,
project_path: null,
tts_path: null,
logger_path: null,
package_path: null,
image_path: pathJoin(basePath, 'resources/image'),
temp_sd_image: pathJoin(basePath, 'resources/image/TempSDImage'),
draft_temp_path: null,
init_config_path: null,
clip_speed_temp_path: null,
add_canvases_temp_path: null,
add_sound_channel_mappings_temp_path: null,
add_vocal_separations_temp_path: null,
add_material_video_temp_path: null,
add_tracks_segments_temp_path: null,
add_tracks_type_temp_path: null,
add_material_animations_temp_path: null,
add_material_text_temp_path: null,
add_track_text_segments_temp_path: null,
add_materials_beats_tmp_path: null,
add_materials_audios_tmp_path: null,
add_tracks_audio_segments_tmp_path: null,
add_keyframe_tmp_path: null,
// API 配置
remotemj_api: 'https://api.laitool.net/',
serverUrl: 'http://lms.laitool.cn',
hkServerUrl: 'https://laitool.net/',
bakServerUrl: 'https://laitool.net/',
API: 'f85d39ed5a40fd09966f13f12b6cf0f0',
lms: 'https://lms.laitool.cn'
};
}
// 浏览器环境下挂载到window
if (isBrowser && typeof window !== 'undefined') {
(window as any).define = define;
}
// 导出类型和实例
export type { DefineConfig };
export { define };

View File

@ -0,0 +1,12 @@
const AXIOS = {
/** http的get请求 */
HTTP_GET: 'http:get',
/** http的post请求 */
HTTP_POST: 'http:post',
/** http的put请求 */
HTTP_PUT: 'http:put',
/** http的delete请求 */
HTTP_DELETE: 'http:delete'
}
export default AXIOS

View File

@ -6,9 +6,11 @@ import BOOK from "./bookDefineString"
import WRITE from "./writeDefineString" import WRITE from "./writeDefineString"
import DB from "./dbDefineString" import DB from "./dbDefineString"
import OPTIONS from "./optionsDefineString" import OPTIONS from "./optionsDefineString"
import AXIOS from "./axiosDefineString"
export const DEFINE_STRING = { export const DEFINE_STRING = {
SYSTEM: SYSTEM, SYSTEM: SYSTEM,
AXIOS : AXIOS,
TASK: TASK, TASK: TASK,
TTS: TTS, TTS: TTS,
BOOK: BOOK, BOOK: BOOK,

View File

@ -0,0 +1,85 @@
import { ipcMain } from 'electron'
import axios from 'axios'
import { DEFINE_STRING } from '../../define/define_string'
function AxiosIpc() {
// 通用 GET 请求
ipcMain.handle(DEFINE_STRING.AXIOS.HTTP_GET, async (_, url, config = {}) => {
try {
const response = await axios.get(url, config)
return {
success: true,
data: response.data,
status: response.status
}
} catch (error: any) {
return {
success: false,
error: error.message,
message: error.message,
status: error.response?.status,
data: error.response?.data
}
}
})
// 通用 POST 请求
ipcMain.handle(DEFINE_STRING.AXIOS.HTTP_POST, async (_, url, data = {}, config = {}) => {
try {
const response = await axios.post(url, data, config)
return {
success: true,
data: response.data,
status: response.status
}
} catch (error: any) {
return {
success: false,
error: error.message,
message: error.message,
status: error.response?.status,
data: error.response?.data
}
}
})
// 通用 PUT 请求
ipcMain.handle(DEFINE_STRING.AXIOS.HTTP_PUT, async (_, url, data = {}, config = {}) => {
try {
const response = await axios.put(url, data, config)
return {
success: true,
data: response.data,
status: response.status
}
} catch (error: any) {
return {
success: false,
error: error.message,
status: error.response?.status,
data: error.response?.data
}
}
})
// 通用 DELETE 请求
ipcMain.handle(DEFINE_STRING.AXIOS.HTTP_DELETE, async (_, url, config = {}) => {
try {
const response = await axios.delete(url, config)
return {
success: true,
data: response.data,
status: response.status
}
} catch (error: any) {
return {
success: false,
error: error.message,
status: error.response?.status,
data: error.response?.data
}
}
})
}
export default AxiosIpc

View File

@ -18,6 +18,7 @@ import { DBIpc } from './dbIpc'
import { PresetIpc } from './presetIpc' import { PresetIpc } from './presetIpc'
import { TaskIpc } from './taskIpc' import { TaskIpc } from './taskIpc'
import { OptionsIpc } from './optionsIpc' import { OptionsIpc } from './optionsIpc'
import AxiosIpc from './axiosIpc'
export async function RegisterIpc(createWindow) { export async function RegisterIpc(createWindow) {
PromptIpc() PromptIpc()
@ -40,4 +41,5 @@ export async function RegisterIpc(createWindow) {
BookIpc() BookIpc()
TTSIpc() TTSIpc()
OptionsIpc() OptionsIpc()
AxiosIpc()
} }

View File

@ -619,7 +619,7 @@ export class MJOpt {
// } // }
// 判断是不是有批量的图片 // 判断是不是有批量的图片
if (task_res.subImagePath.length > 0) { if (task_res.subImagePath && task_res.subImagePath.length > 0) {
batchImages = [...task_res.subImagePath] batchImages = [...task_res.subImagePath]
} }

View File

@ -75,7 +75,10 @@ export default class SystemInfo {
nextDate.setDate(currentDate.getDate() + 1); nextDate.setDate(currentDate.getDate() + 1);
if (systemConfig && systemConfig.gpu && nextDate > new Date()) { if (systemConfig && systemConfig.gpu && nextDate > new Date()) {
global.gpu = systemConfig.gpu global.gpu = systemConfig.gpu
return successMessage(version + ' ' + (global.gpu?.name ? global.gpu.name : ''), '获取成功') return successMessage({
version: version,
gpu: global.gpu
}, '获取版本信息和显卡信息成功', 'SystemIpc_GET_VISION_AND_GPU_MESSAGE');
} }
} }
@ -103,7 +106,10 @@ export default class SystemInfo {
systemConfig.gpu = global.gpu systemConfig.gpu = global.gpu
systemConfig.gpu.updateTime = new Date() systemConfig.gpu.updateTime = new Date()
await fs.promises.writeFile(systemConfigPath, JSON.stringify(systemConfig)); await fs.promises.writeFile(systemConfigPath, JSON.stringify(systemConfig));
return successMessage(version + ' ' + (global.gpu?.name ? global.gpu.name : ''), '获取成功') return successMessage({
version: version,
gpu: global.gpu
}, '获取版本信息和显卡信息成功', 'SystemIpc_GET_VISION_AND_GPU_MESSAGE');
} catch (error) { } catch (error) {
return errorMessage('获取版本信息和显卡信息错误,错误信息如下:' + error.message, 'SystemIpc_GET_VISION_AND_GPU_MESSAGE') return errorMessage('获取版本信息和显卡信息错误,错误信息如下:' + error.message, 'SystemIpc_GET_VISION_AND_GPU_MESSAGE')
} }
@ -116,12 +122,7 @@ export default class SystemInfo {
*/ */
public async CheckMachineStatus(value: string) { public async CheckMachineStatus(value: string) {
try { try {
// 判断机器码是不是存在
// let res = await axios.post('http://api.yu-zhile.com/GetMachineStatus', {
// machineId: value
// })
// /lms/Machine/GetMachineStatus/{machineId}
//
let res = await axios.get('https://lms.laitool.cn/lms/Machine/GetMachineStatus/' + value); let res = await axios.get('https://lms.laitool.cn/lms/Machine/GetMachineStatus/' + value);
if (res.status != 200) { if (res.status != 200) {
throw new Error('请求错误') throw new Error('请求错误')
@ -145,11 +146,6 @@ export default class SystemInfo {
public async GetMachineId() { public async GetMachineId() {
try { try {
let baseId = await machineId(true); let baseId = await machineId(true);
let checkRes = await this.CheckMachineStatus(baseId);
if (checkRes.code == 1) {
global.machineId = baseId;
return successMessage(baseId, '获取机器码成功')
}
let hardwareInfo = ''; let hardwareInfo = '';
try { try {
if (process.platform === 'win32') { if (process.platform === 'win32') {

View File

@ -19,7 +19,7 @@ import { SoftWareServiceBasic } from './Service/ServiceBasic/softwareServiceBasi
// ipc // ipc
import { DiscordIpc, RemoveDiscordIpc } from './IPCEvent/discordIpc.js' import { DiscordIpc, RemoveDiscordIpc } from './IPCEvent/discordIpc.js'
import { Logger } from './logger.js' import { Logger } from './logger.js'
import { RegisterIpc } from './IPCEvent/index.js' import { RegisterIpc } from './IPCEvent/index'
import { InitRemoteMjSettingType } from './initFunc' import { InitRemoteMjSettingType } from './initFunc'
@ -47,13 +47,12 @@ function removeIpcHandler(hash) {
} }
} }
async function createWindow(hash = 'ShowMessage', data, url = null) { async function createWindow(hash = 'mainHome', data, url = null) {
// Create the browser window. // Create the browser window.
await InitData(global) await InitData(global)
global.currentHash = hash global.currentHash = hash
// 判断当前是不是有设置的宽高,用的话记忆 // 判断当前是不是有设置的宽高,用的话记忆
let isRe = let isRe = global.config.window_wh_bm_remember && hash == 'mainHome' && global.config.window_wh_bm
global.config.window_wh_bm_remember && hash == 'ShowMessage' && global.config.window_wh_bm
const ses = session.fromPartition('persist:my-session') const ses = session.fromPartition('persist:my-session')
let mainWindow = new BrowserWindow({ let mainWindow = new BrowserWindow({
width: isRe ? global.config.window_wh_bm.width : 900, width: isRe ? global.config.window_wh_bm.width : 900,
@ -112,7 +111,7 @@ async function createWindow(hash = 'ShowMessage', data, url = null) {
removeIpcHandler(hash) removeIpcHandler(hash)
global.newWindow = global.newWindow.filter((item) => item.id != mainWindow.id) global.newWindow = global.newWindow.filter((item) => item.id != mainWindow.id)
// 判断当前的是不是开启了记录功能 // 判断当前的是不是开启了记录功能
if (global.config.window_wh_bm_remember && hash == 'ShowMessage') { if (global.config.window_wh_bm_remember && hash == 'mainHome') {
let window_wh_bm = mainWindow.getBounds() let window_wh_bm = mainWindow.getBounds()
// 记录到文件中 // 记录到文件中
await setting.ModifySampleSetting(JSON.stringify({ window_wh_bm: window_wh_bm })) await setting.ModifySampleSetting(JSON.stringify({ window_wh_bm: window_wh_bm }))
@ -201,7 +200,7 @@ app.whenReady().then(async () => {
// On macOS it's common to re-create a window in the app when the // On macOS it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open. // dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) { if (BrowserWindow.getAllWindows().length === 0) {
mainWindow = createWindow('ShowMessage', null) mainWindow = createWindow('mainHome', null)
} }
}) })
@ -210,7 +209,7 @@ app.whenReady().then(async () => {
await AutoSync() await AutoSync()
global.newWindow = [] global.newWindow = []
mainWindow = await createWindow('ShowMessage', null) mainWindow = await createWindow('mainHome', null)
global.requestQuene = new AsyncQueue(global, global.config.task_number) global.requestQuene = new AsyncQueue(global, global.config.task_number)
global.fileQueue = new AsyncQueue(global, 1) global.fileQueue = new AsyncQueue(global, 1)

View File

@ -13,4 +13,61 @@ declare namespace SystemConfig {
updateTime?: Date; updateTime?: Date;
} }
//#region 更新信息和版本信息
/**
*
*/
export type ChangeType = 'bugfix' | 'add' | 'improvement' | 'remove' | 'security' | 'performance'
/**
*
*/
export interface ChangeItem {
/** 变更类型 */
type: ChangeType
/** 变更描述 */
description: string
}
/**
*
*/
export interface SubUpdateInfo {
/** 版本号 */
version: string
/** 更新日期 */
updateDate: string
/** 变更列表 */
changes: ChangeItem[]
}
/**
*
*/
export interface UpdateInfo {
/** 最新版本号 */
latestVersion: string
/** 最新更新日期 */
updateDate: string
/** 所有版本的更新信息 */
updateInfo: SubUpdateInfo[]
}
/**
*
*/
interface VersionInfo {
/** 当前版本 */
currentVersion: string
/** 最新版本 */
latestVersion: string
/** 更新信息 */
updateInfo: UpdateInfo
/** 是否可以更新 */
canUpdate: boolean
}
//#endregion
} }

61
src/preload/axios.ts Normal file
View File

@ -0,0 +1,61 @@
import { DEFINE_STRING } from '../define/define_string'
import { ipcRenderer } from 'electron'
// 定义请求配置接口
interface RequestConfig {
headers?: Record<string, string>
params?: Record<string, any>
timeout?: number
[key: string]: any
}
// 定义响应接口
interface HttpResponse<T = any> {
success: boolean
data?: T
error?: string
status?: number
}
// HTTP 客户端接口
const axiosPrelod = {
/**
* GET
* @param url
* @param config
*/
get: async <T = any>(url: string, config?: RequestConfig): Promise<HttpResponse<T>> =>
await ipcRenderer.invoke(DEFINE_STRING.AXIOS.HTTP_GET, url, config),
/**
* POST
* @param url
* @param data
* @param config
*/
post: async <T = any>(
url: string,
data?: any,
config?: RequestConfig
): Promise<HttpResponse<T>> =>
await ipcRenderer.invoke(DEFINE_STRING.AXIOS.HTTP_POST, url, data, config),
/**
* PUT
* @param url
* @param data
* @param config
*/
put: async <T = any>(url: string, data?: any, config?: RequestConfig): Promise<HttpResponse<T>> =>
await ipcRenderer.invoke(DEFINE_STRING.AXIOS.HTTP_PUT, url, data, config),
/**
* DELETE
* @param url
* @param config
*/
delete: async <T = any>(url: string, config?: RequestConfig): Promise<HttpResponse<T>> =>
await ipcRenderer.invoke(DEFINE_STRING.AXIOS.HTTP_DELETE, url, config)
}
export { axiosPrelod }

View File

@ -17,6 +17,7 @@ import { translate } from './translate'
import { preset } from './preset' import { preset } from './preset'
import { task } from './task' import { task } from './task'
import { options } from './options' import { options } from './options'
import { axiosPrelod } from './axios'
// Custom APIs for renderer // Custom APIs for renderer
let events = [] let events = []
@ -480,6 +481,7 @@ if (process.contextIsolated) {
contextBridge.exposeInMainWorld('preset', preset) contextBridge.exposeInMainWorld('preset', preset)
contextBridge.exposeInMainWorld('task', task) contextBridge.exposeInMainWorld('task', task)
contextBridge.exposeInMainWorld('options', options) contextBridge.exposeInMainWorld('options', options)
contextBridge.exposeInMainWorld('axios', axiosPrelod)
contextBridge.exposeInMainWorld('darkMode', { contextBridge.exposeInMainWorld('darkMode', {
toggle: (value) => ipcRenderer.invoke('dark-mode:toggle', value) toggle: (value) => ipcRenderer.invoke('dark-mode:toggle', value)
}) })
@ -505,4 +507,5 @@ if (process.contextIsolated) {
window.task = task window.task = task
window.translate = translate window.translate = translate
window.options = options window.options = options
window.axios = axiosPrelod
} }

View File

@ -13,9 +13,7 @@ declare module 'vue' {
NCheckbox: typeof import('naive-ui')['NCheckbox'] NCheckbox: typeof import('naive-ui')['NCheckbox']
NCode: typeof import('naive-ui')['NCode'] NCode: typeof import('naive-ui')['NCode']
NColorPicker: typeof import('naive-ui')['NColorPicker'] NColorPicker: typeof import('naive-ui')['NColorPicker']
NConfigProvider: typeof import('naive-ui')['NConfigProvider']
NDataTable: typeof import('naive-ui')['NDataTable'] NDataTable: typeof import('naive-ui')['NDataTable']
NDialogProvider: typeof import('naive-ui')['NDialogProvider']
NDivider: typeof import('naive-ui')['NDivider'] NDivider: typeof import('naive-ui')['NDivider']
NDropdown: typeof import('naive-ui')['NDropdown'] NDropdown: typeof import('naive-ui')['NDropdown']
NDynamicInput: typeof import('naive-ui')['NDynamicInput'] NDynamicInput: typeof import('naive-ui')['NDynamicInput']
@ -32,10 +30,6 @@ declare module 'vue' {
NLayoutSider: typeof import('naive-ui')['NLayoutSider'] NLayoutSider: typeof import('naive-ui')['NLayoutSider']
NLog: typeof import('naive-ui')['NLog'] NLog: typeof import('naive-ui')['NLog']
NMenu: typeof import('naive-ui')['NMenu'] NMenu: typeof import('naive-ui')['NMenu']
NMessageProvider: typeof import('naive-ui')['NMessageProvider']
NModal: typeof import('naive-ui')['NModal']
NModalProvider: typeof import('naive-ui')['NModalProvider']
NNotificationProvider: typeof import('naive-ui')['NNotificationProvider']
NPopover: typeof import('naive-ui')['NPopover'] NPopover: typeof import('naive-ui')['NPopover']
NProgress: typeof import('naive-ui')['NProgress'] NProgress: typeof import('naive-ui')['NProgress']
NSelect: typeof import('naive-ui')['NSelect'] NSelect: typeof import('naive-ui')['NSelect']

View File

@ -7,7 +7,9 @@
<n-modal-provider> <n-modal-provider>
<n-dialog-provider> <n-dialog-provider>
<n-notification-provider> <n-notification-provider>
<RouterView></RouterView> <LoadingComponent v-if="loading" @loading-complete="onComplete" fullScreen>
</LoadingComponent>
<RouterView v-else></RouterView>
</n-notification-provider> </n-notification-provider>
</n-dialog-provider> </n-dialog-provider>
</n-modal-provider> </n-modal-provider>
@ -35,7 +37,7 @@
</n-modal> </n-modal>
</template> </template>
<script> <script setup>
import { defineComponent, onMounted } from 'vue' import { defineComponent, onMounted } from 'vue'
import hljs from 'highlight.js/lib/core' import hljs from 'highlight.js/lib/core'
import javascript from 'highlight.js/lib/languages/javascript' import javascript from 'highlight.js/lib/languages/javascript'
@ -51,49 +53,38 @@ import {
} from 'naive-ui' } from 'naive-ui'
import { useSoftwareStore } from '../../stores/software' import { useSoftwareStore } from '../../stores/software'
import { SoftColor } from '../../define/enum/softwareEnum' import { SoftColor } from '../../define/enum/softwareEnum'
import LoadingComponent from './components/Home/LoadingComponent.vue'
const loading = ref(true)
const onComplete = () => {
loading.value = false
}
hljs.registerLanguage('javascript', javascript) hljs.registerLanguage('javascript', javascript)
export default defineComponent({
components: {
NConfigProvider,
NDialogProvider,
NMessageProvider,
NNotificationProvider,
NModalProvider,
NSpin,
NModal
},
setup() {
let softwareStore = useSoftwareStore()
onMounted(async () => { let softwareStore = useSoftwareStore()
softwareStore.SoftColor = SoftColor
window.api.getSettingDafultData(async (value) => {
await window.darkMode.toggle(value.theme)
softwareStore.globalSetting = value
})
let software = await window.setting.GetSoftwareSetting()
if (software.code == 0) {
throw new Error('初始化信息错误: ' + software.message)
}
if (software.data == null) {
throw new Error('初始化信息错误: 未获取到数据')
}
if (software.data.length == 0) {
throw new Error('初始化信息错误: 未获取到数据')
}
softwareStore.softWare = software.data
console.log(softwareStore.softWare)
await window.darkMode.toggle(softwareStore.softWare.theme)
})
return { onMounted(async () => {
javascript, loading.value = true
hljs, softwareStore.SoftColor = SoftColor
darkTheme, window.api.getSettingDafultData(async (value) => {
softwareStore console.log(value.theme)
} await window.darkMode.toggle(value.theme)
softwareStore.globalSetting = value
})
let software = await window.setting.GetSoftwareSetting()
if (software.code == 0) {
throw new Error('初始化信息错误: ' + software.message)
} }
if (software.data == null) {
throw new Error('初始化信息错误: 未获取到数据')
}
if (software.data.length == 0) {
throw new Error('初始化信息错误: 未获取到数据')
}
softwareStore.softWare = software.data
console.log(softwareStore.softWare)
}) })
</script> </script>

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 895 KiB

View File

@ -90,6 +90,7 @@ import {
import { useRouter } from 'vue-router' import { useRouter } from 'vue-router'
import { DEFINE_STRING } from '../../../../../define/define_string' import { DEFINE_STRING } from '../../../../../define/define_string'
import { isEmpty } from 'lodash' import { isEmpty } from 'lodash'
import { TimeDelay } from '@/define/Tools/time'
let softwareStore = useSoftwareStore() let softwareStore = useSoftwareStore()
let reverseManageStore = useReverseManageStore() let reverseManageStore = useReverseManageStore()
let dialog = useDialog() let dialog = useDialog()
@ -357,6 +358,94 @@ async function ButtonSelect(key) {
} }
} }
//
async function resetALLData() {
let da = dialog.warning({
title: '重置所有数据提示',
content: `即将开始重置所有的数据包括GPT提示词、合并提示词、生成图片等当前的操作不可逆是否继续`,
positiveText: '继续',
negativeText: '取消',
onPositiveClick: async () => {
try {
da?.destroy()
softwareStore.spin.spinning = true
softwareStore.spin.tip = '正在重置GPT提示词数据请稍后。。。'
let res = await window.book.ResetGptReverseData(
reverseManageStore.selectBookTask.id,
OperateBookType.BOOKTASK,
BookType.ORIGINAL
)
softwareStore.spin.spinning = false
if (res.code == 1) {
//
for (let i = 0; i < reverseManageStore.selectBookTaskDetail.length; i++) {
reverseManageStore.selectBookTaskDetail[i].gptPrompt = ''
}
}
message.success('重置GPT提示词成功')
await TimeDelay(500)
softwareStore.spin.tip = '正在重置合并提示词数据,请稍后。。。'
for (let i = 0; i < reverseManageStore.selectBookTaskDetail.length; i++) {
const element = reverseManageStore.selectBookTaskDetail[i]
let res = await window.db.UpdateBookTaskDetailData(element.id, {
prompt: ''
})
if (res.code == 1) {
let findIndex = reverseManageStore.selectBookTaskDetail.findIndex(
(item) => item.id == element.id
)
if (findIndex != -1) {
reverseManageStore.selectBookTaskDetail[findIndex].prompt = ''
}
} else {
message.error(res.message)
}
}
message.success('重置合并提示词成功')
await TimeDelay(500)
softwareStore.spin.tip = '正在重置生成图片数据,请稍后。。。'
res = await window.book.ResetGenerateImage(
reverseManageStore.selectBookTask.id,
OperateBookType.BOOKTASK,
false
)
softwareStore.spin.spinning = false
if (res.code == 1) {
for (let i = 0; i < res.data.length; i++) {
const element = res.data[i]
let findIndex = reverseManageStore.selectBookTaskDetail.findIndex(
(item) => item.id == element.id
)
if (findIndex != -1) {
reverseManageStore.selectBookTaskDetail[findIndex].mjMessage = undefined
reverseManageStore.selectBookTaskDetail[findIndex].outImagePath = ''
reverseManageStore.selectBookTaskDetail[findIndex].subImagePath = []
}
}
} else {
message.error(res.message)
return
}
message.success('重置生成图片数据成功')
await TimeDelay(500)
softwareStore.spin.spinning = false
message.success('重置所有数据成功')
} catch (error) {
softwareStore.spin.spinning = false
message.error('重置所有数据失败,' + error.message)
} finally {
softwareStore.spin.spinning = false
}
}
})
}
// GPT // GPT
async function GetPromptAll() { async function GetPromptAll() {
let da = dialog.warning({ let da = dialog.warning({

View File

@ -1,40 +1,38 @@
<template> <template>
<n-spin :show="show"> <n-space vertical>
<n-space vertical> <n-layout has-sider style="height: 100vh; position: relative">
<n-layout has-sider style="height: 100vh; position: relative"> <n-layout-sider
<n-layout-sider bordered
bordered collapse-mode="width"
collapse-mode="width" :collapsed-width="64"
:collapsed-width="64" :width="220"
:width="220" :collapsed="collapsed"
show-trigger
@collapse="collapsed = true"
@expand="collapsed = false"
style="position: relative"
>
<n-menu
:collapsed="collapsed" :collapsed="collapsed"
show-trigger :collapsed-width="64"
@collapse="collapsed = true" :collapsed-icon-size="22"
@expand="collapsed = false" :options="menuOptions"
style="position: relative" :render-icon="renderMenuIcon"
:expand-icon="expandIcon"
default-value="mainHome"
> >
<n-menu </n-menu>
:collapsed="collapsed" </n-layout-sider>
:collapsed-width="64" <n-layout-content content-style="padding: 5px 5px 5px 10px; height:100%">
:collapsed-icon-size="22" <!-- <Setting></Setting> -->
:options="menuOptions" <router-view></router-view>
:render-icon="renderMenuIcon" </n-layout-content>
:expand-icon="expandIcon" </n-layout>
> </n-space>
</n-menu>
</n-layout-sider>
<n-layout-content content-style="padding: 5px 5px 5px 10px; height:100%">
<!-- <Setting></Setting> -->
<router-view></router-view>
</n-layout-content>
</n-layout>
</n-space>
<template #description> 正在加载软件授权信息 </template>
</n-spin>
</template> </template>
<script setup> <script setup>
import { ref, h, onMounted, defineComponent, toRaw, computed } from 'vue' import { ref, h, onMounted, toRaw, computed } from 'vue'
import { RouterLink } from 'vue-router' import { RouterLink } from 'vue-router'
import { import {
useDialog, useDialog,
@ -45,12 +43,12 @@ import {
NLayoutContent, NLayoutContent,
NIcon, NIcon,
useNotification, useNotification,
useMessage, useMessage
NSpin
} from 'naive-ui' } from 'naive-ui'
import { import {
CaretDownOutline, CaretDownOutline,
HomeOutline,
PaperPlaneOutline, PaperPlaneOutline,
SettingsOutline, SettingsOutline,
DuplicateOutline, DuplicateOutline,
@ -58,7 +56,7 @@ import {
RadioOutline, RadioOutline,
BookOutline BookOutline
} from '@vicons/ionicons5' } from '@vicons/ionicons5'
import CheckMachineId from '../Components/CheckMachineId.vue'
import { DEFINE_STRING } from '../../../../define/define_string' import { DEFINE_STRING } from '../../../../define/define_string'
import { MD5 } from 'crypto-js' import { MD5 } from 'crypto-js'
import InputDialogContent from '../Original/Components/InputDialogContent.vue' import InputDialogContent from '../Original/Components/InputDialogContent.vue'
@ -66,16 +64,11 @@ import APIIcon from '../Icon/APIIcon.vue'
import BackTaskIcon from '../Icon/BackTaskIcon.vue' import BackTaskIcon from '../Icon/BackTaskIcon.vue'
import BackTask from '@/renderer/src/components/BackTask/BackTask.vue' import BackTask from '@/renderer/src/components/BackTask/BackTask.vue'
import { useSystemStore } from '../../../../stores/system' import { useSystemStore } from '../../../../stores/system'
import { TimeDelay } from '../../../../define/Tools/time'
import { BookBackTaskStatus } from '../../../../define/enum/bookEnum'
let collapsed = ref(false) let collapsed = ref(false)
let dialog = useDialog() let dialog = useDialog()
let machineRef = ref()
let message = useMessage() let message = useMessage()
let notification = useNotification() let notification = useNotification()
let show = ref(true)
const systemStore = useSystemStore()
let key_down_ref = ref(null) let key_down_ref = ref(null)
let showMenu = ref(true) let showMenu = ref(true)
@ -87,6 +80,7 @@ function renderMenuIcon(option) {
if (option.key == 'sdoriginal') return h(NIcon, null, { default: () => h(PaperPlaneOutline) }) if (option.key == 'sdoriginal') return h(NIcon, null, { default: () => h(PaperPlaneOutline) })
if (option.key == 'setting') return h(NIcon, null, { default: () => h(SettingsOutline) }) if (option.key == 'setting') return h(NIcon, null, { default: () => h(SettingsOutline) })
if (option.key == 'gptCopywriting') return h(NIcon, null, { default: () => h(BookOutline) }) if (option.key == 'gptCopywriting') return h(NIcon, null, { default: () => h(BookOutline) })
if (option.key == 'mainHome') return h(NIcon, null, { default: () => h(HomeOutline) })
if (option.key == 'book_management') return h(NIcon, null, { default: () => h(GridOutline) }) if (option.key == 'book_management') return h(NIcon, null, { default: () => h(GridOutline) })
if (option.key == 'lai_api') return h(NIcon, null, { default: () => h(APIIcon) }) if (option.key == 'lai_api') return h(NIcon, null, { default: () => h(APIIcon) })
if (option.key == 'backward_matrix') return h(NIcon, null, { default: () => h(DuplicateOutline) }) if (option.key == 'backward_matrix') return h(NIcon, null, { default: () => h(DuplicateOutline) })
@ -103,128 +97,7 @@ function renderMenuIcon(option) {
if (option.key == 'TTS_Services') return h(NIcon, null, { default: () => h(RadioOutline) }) if (option.key == 'TTS_Services') return h(NIcon, null, { default: () => h(RadioOutline) })
} }
//
async function StartBackTask(isGiveUp) {
let startRes = await window.task.StartBackTask(isGiveUp)
if (startRes.code == 0) {
message.error(startRes.message)
} else {
message.success(startRes.message)
}
}
/** 判断是不是还是后台任务,用户确认是不是丢弃 */
async function CheckWaitTask() {
await TimeDelay(1000)
let getWaitTaskCountRes = await window.task.GetAllStatusTaskCount([
BookBackTaskStatus.WAIT,
BookBackTaskStatus.RECONNECT
])
//
if (getWaitTaskCountRes.code == 0) {
message.error(getWaitTaskCountRes.message)
await StartBackTask(true)
return
}
if (getWaitTaskCountRes.data && getWaitTaskCountRes.data > 0) {
//
dialog.warning({
title: '后台任务',
content: `检测到后台未完成的任务,任务数量为 ${getWaitTaskCountRes.data},请选择操作!`,
positiveText: '继续执行',
negativeText: '丢弃所有',
onPositiveClick: async () => {
//
await StartBackTask(false)
},
onNegativeClick: async () => {
//
await StartBackTask(true)
}
})
} else {
//
await StartBackTask(true)
}
}
/** 校验机器码的逻辑 */
async function GetMachineStatus() {
try {
//
const machineRes = await window.system.GetMachineId()
if (machineRes.code == 0) {
message.error(machineRes.message)
return
}
systemStore.machineId = machineRes.data
const checkMachineSatatusRes = await window.system.CheckMachineStatus(machineRes.data)
if (checkMachineSatatusRes.code == 1) {
//
message.success('自动检测机器码成功')
//
await CheckWaitTask()
return
} else {
message.error('自动检测机器码失败,请手动确认')
}
//
//
const da = dialog.create({
showIcon: false,
content: () => h(CheckMachineId, { ref: machineRef }),
style: `width : 400px; height: 300px`,
maskClosable: false,
positiveText: '确定',
onClose: () => {
//
window.api.QuitApp()
return false
},
onEsc: () => {
//
window.api.QuitApp()
return false
},
onPositiveClick: async () => {
const checkMachineSatatusRes = await window.system.CheckMachineStatus(machineRes.data)
if (checkMachineSatatusRes.code == 1) {
//
message.success('机器码校验成功')
da.destroy()
//
await CheckWaitTask()
} else {
message.error(checkMachineSatatusRes.message)
return false
}
}
})
} catch (error) {
message.error('获取机器状态码失败,请联系管理员,错误信息如下:' + error.toString())
} finally {
show.value = false
}
}
/** 加载版本和GPU信息 */
async function GetViosionAndGpuMessage() {
//
let res = await window.system.GetViosionAndGpuMessage()
console.log(res)
if (res.code == 1) {
document.title = 'LAITool ' + res.data
} else {
message.success(res.message)
}
}
onMounted(async () => { onMounted(async () => {
//
await GetViosionAndGpuMessage()
// ctrl + alt + l // ctrl + alt + l
window.addEventListener('keydown', (e) => { window.addEventListener('keydown', (e) => {
if (e.ctrlKey && e.altKey && e.key === 'l') { if (e.ctrlKey && e.altKey && e.key === 'l') {
@ -306,7 +179,6 @@ onMounted(async () => {
window.api.setEventListen([DEFINE_STRING.SHOW_MAIN_MESSAGE], (value) => { window.api.setEventListen([DEFINE_STRING.SHOW_MAIN_MESSAGE], (value) => {
let mess = value.message let mess = value.message
let type = 'success'
if (value.code == 0) { if (value.code == 0) {
message.error(mess) message.error(mess)
} else if (value.code == 2) { } else if (value.code == 2) {
@ -318,9 +190,6 @@ onMounted(async () => {
} }
}) })
//
await GetMachineStatus()
window.api.getSettingDafultData(async (value) => { window.api.getSettingDafultData(async (value) => {
window.config = value window.config = value
if (!(window.config && window.config.showOriginal)) { if (!(window.config && window.config.showOriginal)) {
@ -335,6 +204,22 @@ function expandIcon(value) {
const menuOptions = computed(() => { const menuOptions = computed(() => {
let me = [ let me = [
{
label: () =>
h(
RouterLink,
{
to: {
name: 'mainHome'
},
class: 'router-link-a'
},
{
default: () => '首页'
}
),
key: 'mainHome'
},
{ {
label: () => label: () =>
h( h(

View File

@ -0,0 +1,842 @@
<!-- LoadingComponent.vue -->
<template>
<div class="loading-container" :class="{ 'full-screen': fullScreen }">
<div class="loading-content">
<!-- 主要加载动画 -->
<div class="loading-spinner">
<div class="spinner-ring">
<div></div>
<div></div>
<div></div>
<div></div>
</div>
</div>
<!-- 加载文本 -->
<div class="loading-text">
<h3>{{ title }}</h3>
<p>{{ currentStepText }}</p>
</div>
<!-- 进度条 -->
<div class="loading-progress">
<div class="progress-bar">
<div class="progress-fill" :style="{ width: progress + '%' }"></div>
</div>
<span class="progress-text">{{ progress }}%</span>
</div>
<!-- 加载步骤 -->
<div class="loading-steps">
<div
v-for="(step, index) in loadingSteps"
:key="index"
class="step-item"
:class="{
active: index === currentStep,
completed: index < currentStep
}"
>
<div class="step-icon">
<n-icon v-if="index < currentStep" size="14">
<checkmark-outline />
</n-icon>
<div v-else-if="index === currentStep" class="step-loading"></div>
<span v-else class="step-number">{{ index + 1 }}</span>
</div>
<span class="step-text">{{ step.text }}</span>
</div>
</div>
</div>
<!-- 背景装饰 -->
<div class="loading-background">
<div class="bg-circle bg-circle-1"></div>
<div class="bg-circle bg-circle-2"></div>
<div class="bg-circle bg-circle-3"></div>
</div>
</div>
</template>
<script setup>
import { ref, computed, onMounted, nextTick, readonly } from 'vue'
import { NIcon, useMessage, useDialog } from 'naive-ui'
import { CheckmarkOutline } from '@vicons/ionicons5'
import { useSystemStore } from '@/stores/system'
import CheckMachineId from '@/renderer/src/components/Components/CheckMachineId.vue'
import { TimeDelay } from '@/define/Tools/time'
import { BookBackTaskStatus } from '@/define/enum/bookEnum'
import { define } from '@/define/define'
import { ValidateJson } from '@/define/Tools/validate'
const message = useMessage()
const dialog = useDialog()
const systemStore = useSystemStore()
const props = defineProps({
//
fullScreen: {
type: Boolean,
default: false
},
//
title: {
type: String,
default: 'LAI Tool 启动中'
},
//
themeColor: {
type: String,
default: '#18a058'
},
//
autoStart: {
type: Boolean,
default: true
}
})
const emit = defineEmits(['loading-complete'])
//
const progress = ref(0)
const currentStep = ref(0)
const isCompleted = ref(false)
const isLoading = ref(false) //
//
const loadingSteps = ref([
{
text: '初始化系统组件',
duration: 1000,
action: async () => {
//
await new Promise((resolve) => setTimeout(resolve, 800))
}
},
{
text: '获取设备硬件信息',
duration: 900,
action: async () => {
//
await GetViosionAndGpuMessage()
await new Promise((resolve) => setTimeout(resolve, 700))
}
},
{
text: '加载软件信息',
duration: 1200,
action: async () => {
//
await GetRemoteSystemInformation()
await new Promise((resolve) => setTimeout(resolve, 1000))
}
},
{
text: '检查软件授权状态',
duration: 800,
action: async () => {
await GetMachineStatus()
//
await new Promise((resolve) => setTimeout(resolve, 600))
}
},
{
text: '准备用户界面',
duration: 600,
action: async () => {
//
await new Promise((resolve) => setTimeout(resolve, 400))
}
}
])
//
async function GetRemoteSystemInformation() {
try {
//
let remoteVersionRes = await window.axios.get(
define.lms + `/lms/Options/GetOptionsByKey/0/2/LaitoolVersion`
)
console.log('remoteVersionRes', remoteVersionRes)
if (remoteVersionRes.success == false) {
throw new Error('获取远程版本信息失败: ' + remoteVersionRes.message)
}
if (remoteVersionRes.data && remoteVersionRes.data.code == 1) {
systemStore.remoteVersion = remoteVersionRes.data.data.value
} else {
throw new Error('获取远程版本信息失败: ' + remoteVersionRes.data.message)
}
//
let remoteUpdateJsonContentRes = await window.axios.get(
define.lms + `/lms/Options/GetOptionsByKey/0/2/LaitoolUpdateJsonContent`
)
console.log('remoteUpdateJsonContentRes', remoteUpdateJsonContentRes)
if (remoteUpdateJsonContentRes.success == false) {
throw new Error('获取远程更新信息失败: ' + remoteUpdateJsonContentRes.message)
}
if (
remoteUpdateJsonContentRes.data &&
remoteUpdateJsonContentRes.data.code == 1 &&
ValidateJson(remoteUpdateJsonContentRes.data.data.value)
) {
systemStore.versionInfo.updateInfo = JSON.parse(remoteUpdateJsonContentRes.data.data.value)
} else {
throw new Error('获取远程更新信息失败: ' + remoteUpdateJsonContentRes.data.message)
}
systemStore.versionInfo.currentVersion = 'v' + systemStore.version
systemStore.versionInfo.latestVersion = 'v' + systemStore.remoteVersion
if (systemStore.version < systemStore.remoteVersion) {
//
systemStore.versionInfo.canUpdate = true
} else {
systemStore.versionInfo.canUpdate = false
}
} catch (error) {
throw error
}
}
/** 加载版本和GPU信息 */
async function GetViosionAndGpuMessage() {
//
let res = await window.system.GetViosionAndGpuMessage()
console.log(res)
if (res.code == 1) {
document.title = 'LAITool v' + res.data.version
systemStore.version = res.data.version
systemStore.gpu = res.data.gpu
} else {
message.success(res.message)
}
}
const machineRef = ref(null)
/** 校验机器码的逻辑 */
async function GetMachineStatus() {
try {
//
const machineRes = await window.system.GetMachineId()
if (machineRes.code == 0) {
message.error(machineRes.message)
return
}
systemStore.machineId = machineRes.data
const checkMachineSatatusRes = await window.system.CheckMachineStatus(machineRes.data)
if (checkMachineSatatusRes.code == 1) {
//
message.success('自动检测机器码成功')
//
await CheckWaitTask()
return
} else {
message.error('自动检测机器码失败,请手动确认')
}
//
//
const da = dialog.create({
showIcon: false,
content: () => h(CheckMachineId, { ref: machineRef }),
style: `width : 400px; height: 300px`,
maskClosable: false,
positiveText: '确定',
onClose: () => {
//
window.api.QuitApp()
return false
},
onEsc: () => {
//
window.api.QuitApp()
return false
},
onPositiveClick: async () => {
const checkMachineSatatusRes = await window.system.CheckMachineStatus(machineRes.data)
if (checkMachineSatatusRes.code == 1) {
//
message.success('机器码校验成功')
da.destroy()
//
jumpToStep(4)
await nextTick() // DOM
continue_() // 3
//
await CheckWaitTask()
} else {
message.error(checkMachineSatatusRes.message)
return false
}
}
})
throw new Error('手动验证机器码')
} catch (error) {
message.error('获取机器状态码失败,请联系管理员,错误信息如下:' + error.toString())
throw error
} finally {
}
}
/** 判断是不是还是后台任务,用户确认是不是丢弃 */
async function CheckWaitTask() {
await TimeDelay(1000)
let getWaitTaskCountRes = await window.task.GetAllStatusTaskCount([
BookBackTaskStatus.WAIT,
BookBackTaskStatus.RECONNECT
])
//
if (getWaitTaskCountRes.code == 0) {
message.error(getWaitTaskCountRes.message)
await StartBackTask(true)
return
}
if (getWaitTaskCountRes.data && getWaitTaskCountRes.data > 0) {
//
dialog.warning({
title: '后台任务',
content: `检测到后台未完成的任务,任务数量为 ${getWaitTaskCountRes.data},请选择操作!`,
positiveText: '继续执行',
negativeText: '丢弃所有',
onPositiveClick: async () => {
//
await StartBackTask(false)
},
onNegativeClick: async () => {
//
await StartBackTask(true)
}
})
} else {
//
await StartBackTask(true)
}
}
//
async function StartBackTask(isGiveUp) {
let startRes = await window.task.StartBackTask(isGiveUp)
if (startRes.code == 0) {
message.error(startRes.message)
} else {
message.success(startRes.message)
}
}
//
const currentStepText = computed(() => {
if (isCompleted.value) {
return '加载完成,即将进入应用'
}
if (currentStep.value < loadingSteps.value.length) {
return loadingSteps.value[currentStep.value].text
}
return '正在准备...'
})
//
const startLoading = async () => {
if (isLoading.value) return //
isLoading.value = true
currentStep.value = 0
progress.value = 0
isCompleted.value = false
await executeStepsFrom(0)
}
//
const continueLoading = async () => {
if (isLoading.value) return //
if (isCompleted.value) return //
isLoading.value = true
await executeStepsFrom(currentStep.value)
}
//
const executeStepsFrom = async (startStep) => {
const totalSteps = loadingSteps.value.length
for (let i = startStep; i < totalSteps; i++) {
currentStep.value = i
const step = loadingSteps.value[i]
try {
//
const startTime = Date.now()
//
await Promise.all([
//
animateProgress(i, totalSteps),
//
step.action()
])
//
const elapsed = Date.now() - startTime
const minDuration = step.duration || 800
if (elapsed < minDuration) {
await new Promise((resolve) => setTimeout(resolve, minDuration - elapsed))
}
} catch (error) {
//
console.error(`步骤 ${i + 1} 执行失败:`, error)
isLoading.value = false
return //
}
}
//
currentStep.value = totalSteps
progress.value = 100
isCompleted.value = true
isLoading.value = false
//
setTimeout(() => {
emit('loading-complete')
}, 500)
}
//
const animateProgress = async (stepIndex, totalSteps) => {
const startProgress = Math.floor((stepIndex / totalSteps) * 100)
const endProgress = Math.floor(((stepIndex + 1) / totalSteps) * 100)
const duration = 300 //
const steps = 20 //
const stepDuration = duration / steps
const progressStep = (endProgress - startProgress) / steps
for (let i = 0; i <= steps; i++) {
progress.value = Math.min(startProgress + Math.floor(progressStep * i), endProgress)
await new Promise((resolve) => setTimeout(resolve, stepDuration))
}
}
//
const start = () => {
startLoading()
}
//
const continue_ = () => {
continueLoading()
}
//
const skipCurrentStep = () => {
if (isLoading.value || isCompleted.value) return
//
currentStep.value += 1
//
continueLoading()
}
//
const jumpToStep = (stepIndex) => {
if (isLoading.value || isCompleted.value) return
if (stepIndex < 0 || stepIndex >= loadingSteps.value.length) return
currentStep.value = stepIndex
//
const totalSteps = loadingSteps.value.length
progress.value = Math.floor((stepIndex / totalSteps) * 100)
}
//
const reset = () => {
isLoading.value = false
progress.value = 0
currentStep.value = 0
isCompleted.value = false
}
//
const pause = () => {
isLoading.value = false
}
//
const resume = () => {
if (!isCompleted.value) {
continueLoading()
}
}
//
defineExpose({
start, //
continue: continue_, //
skipCurrentStep, //
jumpToStep, //
reset, //
pause, //
resume, //
//
isLoading: readonly(isLoading),
isCompleted: readonly(isCompleted),
currentStep: readonly(currentStep),
progress: readonly(progress)
})
//
onMounted(() => {
if (props.autoStart) {
nextTick(() => {
startLoading()
})
}
})
</script>
<style scoped>
.loading-container {
position: relative;
display: flex;
align-items: center;
justify-content: center;
min-height: 200px;
border-radius: 12px;
overflow: hidden;
}
.loading-container.full-screen {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
width: 100vw;
height: 100vh;
min-height: 100vh;
max-height: 100vh;
border-radius: 0;
margin: 0;
padding: 0;
}
.loading-content {
position: relative;
text-align: center;
max-width: 400px;
padding: 20px;
width: 100%;
box-sizing: border-box;
}
/* 确保全屏时内容居中 */
.loading-container.full-screen .loading-content {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: auto;
max-width: 500px;
min-width: 400px;
}
/* 加载动画 */
.loading-spinner {
margin-bottom: 24px;
}
.spinner-ring {
display: inline-block;
position: relative;
width: 64px;
height: 64px;
}
.spinner-ring div {
box-sizing: border-box;
display: block;
position: absolute;
width: 51px;
height: 51px;
margin: 6px;
border: 6px solid v-bind(themeColor);
border-radius: 50%;
animation: spinner-ring 1.2s cubic-bezier(0.5, 0, 0.5, 1) infinite;
border-color: v-bind(themeColor) transparent transparent transparent;
}
.spinner-ring div:nth-child(1) {
animation-delay: -0.45s;
}
.spinner-ring div:nth-child(2) {
animation-delay: -0.3s;
}
.spinner-ring div:nth-child(3) {
animation-delay: -0.15s;
}
@keyframes spinner-ring {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
/* 加载文本 */
.loading-text h3 {
margin: 0 0 8px 0;
font-size: 18px;
font-weight: 600;
}
.loading-text p {
margin: 0;
font-size: 14px;
line-height: 1.5;
min-height: 20px;
}
/* 进度条 */
.loading-progress {
margin: 24px 0;
}
.progress-bar {
width: 100%;
height: 6px;
background: #f0f0f0;
border-radius: 3px;
overflow: hidden;
margin-bottom: 8px;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, v-bind(themeColor), #40a9ff);
border-radius: 3px;
transition: width 0.3s ease;
}
.progress-text {
font-size: 12px;
font-weight: 500;
}
/* 加载步骤 */
.loading-steps {
text-align: left;
max-width: 100%;
}
.step-item {
display: flex;
align-items: center;
gap: 12px;
padding: 8px 0;
opacity: 0.5;
transition: all 0.3s ease;
}
.step-item.active {
opacity: 1;
color: v-bind(themeColor);
}
.step-item.completed {
opacity: 0.8;
color: #52c41a;
}
.step-icon {
width: 24px;
height: 24px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
background: #f0f0f0;
font-size: 12px;
font-weight: 600;
flex-shrink: 0;
}
.step-item.active .step-icon {
background: v-bind(themeColor);
color: white;
}
.step-item.completed .step-icon {
background: #52c41a;
color: white;
}
.step-loading {
width: 12px;
height: 12px;
border: 2px solid white;
border-top: 2px solid transparent;
border-radius: 50%;
animation: spin 1s linear infinite;
}
.step-number {
}
.step-item.active .step-number {
}
.step-text {
font-size: 14px;
font-weight: 500;
flex: 1;
word-break: break-word;
}
@keyframes spin {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
/* 背景装饰 */
.loading-background {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
width: 100%;
height: 100%;
overflow: hidden;
}
.bg-circle {
position: absolute;
border-radius: 50%;
background: linear-gradient(45deg, rgba(24, 160, 88, 0.1), rgba(64, 169, 255, 0.1));
animation: float 6s ease-in-out infinite;
}
.bg-circle-1 {
width: 100px;
height: 100px;
top: 10%;
left: 10%;
animation-delay: 0s;
}
.bg-circle-2 {
width: 150px;
height: 150px;
top: 60%;
right: 10%;
animation-delay: 2s;
}
.bg-circle-3 {
width: 80px;
height: 80px;
bottom: 20%;
left: 60%;
animation-delay: 4s;
}
@keyframes float {
0%,
100% {
transform: translateY(0px) rotate(0deg);
}
50% {
transform: translateY(-20px) rotate(180deg);
}
}
/* 深色模式 */
.dark .loading-container {
background: rgba(26, 26, 26, 0.95);
}
.dark .loading-container.full-screen {
background: rgba(26, 26, 26, 0.98);
}
.dark .loading-text h3 {
color: #fff;
}
.dark .loading-text p {
color: #ccc;
}
.dark .progress-bar {
background: #3d3d3d;
}
.dark .progress-text {
color: #ccc;
}
.dark .step-icon {
background: #3d3d3d;
}
.dark .step-number {
color: #ccc;
}
.dark .step-text {
color: #fff;
}
.dark .step-item {
color: #ccc;
}
/* 响应式适配 */
@media (max-width: 768px) {
.loading-container.full-screen .loading-content {
min-width: 300px;
max-width: 90vw;
padding: 16px;
}
.loading-text h3 {
font-size: 16px;
}
.step-text {
font-size: 13px;
}
}
/* 确保覆盖所有可能的父级样式 */
.loading-container.full-screen {
position: fixed !important;
inset: 0 !important;
width: 100vw !important;
height: 100vh !important;
min-width: 100vw !important;
min-height: 100vh !important;
max-width: 100vw !important;
max-height: 100vh !important;
left: 0 !important;
top: 0 !important;
right: 0 !important;
bottom: 0 !important;
margin: 0 !important;
padding: 0 !important;
border: none !important;
border-radius: 0 !important;
box-sizing: border-box !important;
}
</style>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,238 @@
<template>
<div class="contact-developer">
<!-- 头部说明 -->
<div class="header-section">
<div class="title">👨💻 开发者微信</div>
<div class="description">扫描下方二维码添加开发者微信获取专业技术支持</div>
</div>
<!-- 二维码图片容器 -->
<div class="qrcode-container">
<img
src="../../assets//dev-user.jpg"
alt="开发者微信二维码"
class="qrcode-image"
@error="handleImageError"
/>
<div v-if="showPlaceholder" class="qrcode-placeholder">
<div class="placeholder-icon">📱</div>
<div class="placeholder-text">二维码图片</div>
<div class="placeholder-subtext">请联系管理员获取</div>
</div>
</div>
<!-- 联系说明 -->
<div class="contact-info">
<div class="contact-title">
<span class="contact-icon">💡</span>
联系须知
</div>
<ul class="contact-list">
<li>添加时请备注LAITool用户</li>
<li>工作时间周一至周五 9:00-18:00</li>
<li>支持技术咨询BUG反馈功能建议</li>
<li>请详细描述您遇到的问题以便快速解决</li>
</ul>
</div>
<!-- 底部提示 -->
<div class="footer-info">
<div>如无法扫码可复制微信号</div>
<div class="wechat-id" @click="copyWechatId">{{ wechatId }}</div>
<div>或发送邮件至{{ email }}</div>
</div>
</div>
</template>
<script setup>
import { ref } from 'vue'
import { useMessage } from 'naive-ui'
const props = defineProps({
wechatId: {
type: String,
default: 'xiangbie88'
},
email: {
type: String,
default: '2769838458@qq.com'
}
})
const emit = defineEmits(['copy-success', 'copy-error'])
const message = useMessage()
const showPlaceholder = ref(false)
const handleImageError = () => {
showPlaceholder.value = true
}
const copyWechatId = async () => {
try {
await navigator.clipboard.writeText(props.wechatId)
message.success('微信号已复制到剪贴板')
emit('copy-success', props.wechatId)
} catch (error) {
message.error(`复制失败,请手动复制:${props.wechatId}`)
emit('copy-error', error)
}
}
</script>
<style scoped>
.contact-developer {
text-align: center;
padding: 20px 0;
font-size: 14px;
line-height: 1.6;
}
.header-section {
margin-bottom: 24px;
}
.title {
font-size: 16px;
font-weight: 600;
margin-bottom: 8px;
}
.description {
font-size: 14px;
line-height: 1.5;
}
.qrcode-container {
margin: 24px 0;
padding: 20px;
background: linear-gradient(135deg, #f0f9ff 0%, #e6f7ff 100%);
border-radius: 12px;
border: 2px dashed #1890ff;
position: relative;
}
.qrcode-image {
width: 200px;
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
}
.qrcode-placeholder {
width: 200px;
height: 200px;
background: #f5f5f5;
border: 2px dashed #d9d9d9;
border-radius: 8px;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
color: #999;
margin: 0 auto;
}
.placeholder-icon {
font-size: 48px;
margin-bottom: 12px;
}
.placeholder-text {
font-size: 14px;
}
.placeholder-subtext {
font-size: 12px;
margin-top: 4px;
}
.contact-info {
margin-top: 24px;
padding: 16px;
background: #fff7e6;
border-radius: 8px;
border-left: 4px solid #fa8c16;
text-align: left;
}
.contact-title {
font-weight: 600;
color: #fa8c16;
margin-bottom: 8px;
display: flex;
align-items: center;
justify-content: center;
}
.contact-icon {
margin-right: 8px;
}
.contact-list {
margin: 0;
padding-left: 20px;
color: #666;
font-size: 13px;
line-height: 1.6;
}
.contact-list li {
margin-bottom: 4px;
}
.footer-info {
margin-top: 16px;
font-size: 12px;
line-height: 1.4;
}
.wechat-id {
font-weight: 600;
color: #1890ff;
margin-top: 4px;
font-size: 20px;
cursor: pointer;
user-select: all;
padding: 4px 8px;
border-radius: 4px;
transition: background-color 0.2s ease;
}
/* 深色模式适配 */
.dark .title,
.dark .contact-title {
color: #fff;
}
.dark .description {
color: #ccc;
}
.dark .qrcode-container {
background: linear-gradient(135deg, #1a2332 0%, #2a3441 100%);
border-color: #3a4a5c;
}
.dark .qrcode-placeholder {
background: #2d2d2d;
border-color: #4a4a4a;
color: #ccc;
}
.dark .contact-info {
background: #2a2418;
border-color: #5a4a2c;
}
.dark .contact-list {
color: #ccc;
}
.dark .footer-info {
color: #ccc;
}
.dark .wechat-id:hover {
background-color: #1a2332;
}
</style>

View File

@ -0,0 +1,164 @@
<template>
<div class="wechat-group">
<!-- 群信息卡片 -->
<div class="group-info">
<div class="group-title">🎯 加入VIP用户交流群</div>
<div class="group-description">{{ groupInfo }}</div>
</div>
<!-- 群内福利 -->
<div class="benefits-section">
<div class="benefits-title">💡 群内福利</div>
<ul class="benefits-list">
<li class="benefit-item">
<span class="benefit-icon">📢</span>
获取最新版本信息和更新通知
</li>
<li class="benefit-item">
<span class="benefit-icon">💬</span>
与其他用户交流使用技巧和经验
</li>
<li class="benefit-item">
<span class="benefit-icon">🐛</span>
快速反馈问题和建议
</li>
<li class="benefit-item">
<span class="benefit-icon">🛠</span>
获得开发团队的技术支持
</li>
</ul>
</div>
<!-- 联系方式 -->
<div class="contact-info">联系管理员邀请入群或者联系对应代理</div>
<!-- 群规提醒 -->
<div class="rules-notice">请勿在群内发布广告或无关内容保持良好交流氛围</div>
</div>
</template>
<script setup>
const props = defineProps({
groupInfo: {
type: String,
default: '仅限永久VIP用户可加入'
}
})
</script>
<style scoped>
.wechat-group {
line-height: 1.6;
font-size: 14px;
}
.group-info {
margin-bottom: 16px;
padding: 12px;
background: #f0f9ff;
border-radius: 8px;
border-left: 4px solid #1890ff;
}
.group-title {
font-weight: 600;
color: #1890ff;
margin-bottom: 4px;
}
.group-description {
font-size: 13px;
color: #1890ff;
}
.benefits-section {
margin-bottom: 16px;
}
.benefits-title {
margin-bottom: 12px;
font-weight: 600;
}
.benefits-list {
margin: 0;
padding-left: 20px;
}
.benefit-item {
margin-bottom: 8px;
padding: 4px 0;
display: flex;
align-items: flex-start;
gap: 8px;
}
.benefit-icon {
font-weight: 600;
flex-shrink: 0;
}
.benefit-item:nth-child(1) .benefit-icon {
color: #52c41a;
}
.benefit-item:nth-child(2) .benefit-icon {
color: #1890ff;
}
.benefit-item:nth-child(3) .benefit-icon {
color: #f0a020;
}
.benefit-item:nth-child(4) .benefit-icon {
color: #d03050;
}
.contact-info {
margin-top: 16px;
padding: 8px;
background: #fff7e6;
border-radius: 6px;
text-align: center;
color: #fa8c16;
font-size: 12px;
}
.rules-notice {
margin-top: 8px;
padding: 8px;
background: #fff7e6;
border-radius: 6px;
text-align: center;
color: #fa8c16;
font-size: 12px;
}
/* 深色模式适配 */
.dark .group-info {
background: #1a2332;
border-color: #3a4a5c;
}
.dark .group-title {
color: #40a9ff;
}
.dark .group-description {
color: #ccc;
}
.dark .benefits-title {
color: #fff;
}
.dark .benefits-list {
color: #ccc;
}
.dark .contact-info,
.dark .rules-notice {
background: #2a2418;
color: #ffa940;
}
</style>

View File

@ -13,6 +13,11 @@ const routes = [
path: '/', path: '/',
component: () => import('./components/Home/Home.vue'), component: () => import('./components/Home/Home.vue'),
children: [ children: [
{
path: '/mainHome',
name: 'mainHome',
component: () => import('./components/Home/SoftwareHome.vue')
},
{ {
path: '/gptCopywriting', path: '/gptCopywriting',
name: 'gptCopywriting', name: 'gptCopywriting',

View File

@ -6,7 +6,36 @@ export type SystemStoreModel = {
export const useSystemStore = defineStore('system', { export const useSystemStore = defineStore('system', {
state: () => ({ state: () => ({
machineId: undefined machineId: undefined,
version: '1.1.1',
remoteVersion: '1.1.1',
versionInfo: {
currentVersion: 'v1.0.0',
latestVersion: 'v1.0.0',
updateInfo: {
latestVersion: 'v1.0.0',
updateDate: '2023-10-01',
updateInfo: [
{
version: 'v1.0.0',
updateDate: '2023-09-01',
changes: [
{
type: 'improvement',
description: '改进了性能'
}
]
}
]
},
canUpdate: false
} as SystemConfig.VersionInfo,
gpu: {
name: void 0,
type: void 0,
vender: void 0,
updateTime: void 0
} as SystemConfig.GpuMessage
} as SystemStoreModel), } as SystemStoreModel),
getters: { getters: {
}, },