From 3249c18d172d779dd4a3d201961efec8ee683ed7 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Sat, 23 Aug 2025 21:54:13 +0800 Subject: [PATCH 01/13] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=80=9A=E8=BF=87?= =?UTF-8?q?=E4=BB=BB=E5=8A=A1=E5=AF=B9=E6=9C=AC=E5=9C=B0=EF=BC=88=E7=BD=91?= =?UTF-8?q?=E7=9B=98=EF=BC=89=E6=96=87=E4=BB=B6=E8=BF=9B=E8=A1=8C=E9=87=8D?= =?UTF-8?q?=E5=91=BD=E5=90=8D=E6=97=B6=E6=B2=A1=E6=9C=89=E5=BA=94=E7=94=A8?= =?UTF-8?q?=E8=BF=87=E6=BB=A4=E8=A7=84=E5=88=99=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 在 do_rename_task() 函数中为所有命名模式(顺序命名、剧集命名、正则命名)添加了过滤规则检查,确保本地文件重命名时也会应用 filterwords 过滤规则 --- quark_auto_save.py | 94 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) diff --git a/quark_auto_save.py b/quark_auto_save.py index c9f0f70..109193f 100644 --- a/quark_auto_save.py +++ b/quark_auto_save.py @@ -3003,7 +3003,37 @@ class Quark: non_dir_files = [f for f in dir_file_list if not f.get("dir", False)] is_empty_dir = len(non_dir_files) == 0 - + # 应用过滤词过滤(修复bug:为本地文件重命名添加过滤规则) + if task.get("filterwords"): + # 记录过滤前的文件总数 + original_total_count = len(dir_file_list) + + # 同时支持中英文逗号分隔 + filterwords = task["filterwords"].replace(",", ",") + filterwords_list = [word.strip().lower() for word in filterwords.split(',')] + + # 过滤掉包含过滤词的文件 + filtered_files = [] + for file in dir_file_list: + if file.get("dir", False): + # 文件夹也需要检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + else: + # 文件检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + dir_file_list = filtered_files + dir_file_name_list = [item["file_name"] for item in dir_file_list] # 找出当前最大序号 max_sequence = 0 @@ -3609,6 +3639,37 @@ class Quark: is_rename_count = 0 renamed_files = {} + # 应用过滤词过滤(修复bug:为本地文件重命名添加过滤规则) + if task.get("filterwords"): + # 记录过滤前的文件总数 + original_total_count = len(dir_file_list) + + # 同时支持中英文逗号分隔 + filterwords = task["filterwords"].replace(",", ",") + filterwords_list = [word.strip().lower() for word in filterwords.split(',')] + + # 过滤掉包含过滤词的文件 + filtered_files = [] + for file in dir_file_list: + if file.get("dir", False): + # 文件夹也需要检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + else: + # 文件检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + dir_file_list = filtered_files + # 使用一个列表收集所有需要重命名的操作 rename_operations = [] rename_logs = [] # 收集重命名日志 @@ -3754,6 +3815,37 @@ class Quark: # 获取目录中的文件列表 dir_file_list = self.ls_dir(self.savepath_fid[savepath]) + # 应用过滤词过滤(修复bug:为本地文件重命名添加过滤规则) + if task.get("filterwords"): + # 记录过滤前的文件总数 + original_total_count = len(dir_file_list) + + # 同时支持中英文逗号分隔 + filterwords = task["filterwords"].replace(",", ",") + filterwords_list = [word.strip().lower() for word in filterwords.split(',')] + + # 过滤掉包含过滤词的文件 + filtered_files = [] + for file in dir_file_list: + if file.get("dir", False): + # 文件夹也需要检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + else: + # 文件检查过滤词 + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + dir_file_list = filtered_files + # 使用一个列表收集所有需要重命名的操作 rename_operations = [] rename_logs = [] # 收集重命名日志 From 9e32ef79975e99843813942cb2644c95a7cae6f6 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Sun, 24 Aug 2025 00:02:51 +0800 Subject: [PATCH 02/13] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E9=AB=98=E7=BA=A7?= =?UTF-8?q?=E8=BF=87=E6=BB=A4=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 支持使用保留词和过滤词实现更为复杂的过滤逻辑 --- README.md | 2 +- app/run.py | 158 ++++++++++++++++++++------ app/templates/index.html | 12 +- quark_auto_save.py | 240 +++++++++++++++++++-------------------- 4 files changed, 247 insertions(+), 165 deletions(-) diff --git a/README.md b/README.md index 8c8e547..ee5b7e9 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # 夸克自动转存 本项目是在 [Cp0204/quark-auto-save:0.5.3.1](https://github.com/Cp0204/quark-auto-save) 的基础上修改而来的(感谢 [Cp0204](https://github.com/Cp0204)),我对整个 WebUI 进行了重塑,增加了更多实用功能,新增功能的代码都是通过 AI 完成的,不保证功能的稳定性。主要的新增功能如下([详见](https://github.com/x1ao4/quark-auto-save-x/wiki)): -- **过滤项目**:通过在 `过滤规则` 里设置过滤词来过滤不需要转存的文件或文件夹。 +- **过滤项目**:通过在 `过滤规则` 里设置过滤词来过滤不需要转存的文件或文件夹。支持高级过滤功能,使用保留词和过滤词可实现复杂的过滤逻辑。 - **顺序命名**:通过使用包含 `{}` 的表达式(如 `乘风2025 - S06E{}`)自动切换为 `顺序命名` 模式,该模式将通过文件名与上传时间等信息对文件进行智能排序,然后按顺序对每个文件的 `{}` 赋予序号,实现顺序命名。 - **剧集命名**:通过使用包含 `[]` 的表达式(如 `黑镜 - S06E[]`)自动切换为 `剧集命名` 模式,该模式将从原始文件名中提取剧集编号,然后把提取的编号代入对应文件名的 `[]` 中,实现自动按剧集编号命名。 - **自动切换命名模式**:默认的命名模式依然为 `正则命名` 模式,现在会通过用户输入的 `匹配表达式` 自动实时判断和切换对应的模式。 diff --git a/app/run.py b/app/run.py index 2c28a65..abb33d5 100644 --- a/app/run.py +++ b/app/run.py @@ -43,6 +43,106 @@ from quark_auto_save import extract_episode_number, sort_file_by_name, chinese_t # 导入豆瓣服务 from sdk.douban_service import douban_service +def advanced_filter_files(file_list, filterwords): + """ + 高级过滤函数,支持保留词和过滤词 + + Args: + file_list: 文件列表 + filterwords: 过滤规则字符串,支持以下格式: + - "加更,企划,超前,(1),mkv,nfo" # 只有过滤词 + - "期|加更,企划,超前,(1),mkv,nfo" # 保留词|过滤词 + - "期,2160P|加更,企划,超前,(1),mkv,nfo" # 多个保留词(或关系)|过滤词 + - "期|2160P|加更,企划,超前,(1),mkv,nfo" # 多个保留词(并关系)|过滤词 + - "期,2160P|" # 只有保留词,无过滤词 + + Returns: + 过滤后的文件列表 + """ + if not filterwords or not filterwords.strip(): + return file_list + + # 检查是否包含分隔符 | + if '|' not in filterwords: + # 只有过滤词的情况 + filterwords = filterwords.replace(",", ",") + filterwords_list = [word.strip().lower() for word in filterwords.split(',') if word.strip()] + + filtered_files = [] + for file in file_list: + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + return filtered_files + + # 包含分隔符的情况,需要解析保留词和过滤词 + parts = filterwords.split('|') + if len(parts) < 2: + # 格式错误,返回原列表 + return file_list + + # 最后一个|后面的是过滤词 + filter_part = parts[-1].strip() + # 前面的都是保留词 + keep_parts = [part.strip() for part in parts[:-1] if part.strip()] + + # 解析过滤词 + filterwords_list = [] + if filter_part: + filter_part = filter_part.replace(",", ",") + filterwords_list = [word.strip().lower() for word in filter_part.split(',') if word.strip()] + + # 解析保留词:每个|分隔的部分都是一个独立的筛选条件 + # 这些条件需要按顺序依次应用,形成链式筛选 + keep_conditions = [] + for part in keep_parts: + if part.strip(): + if ',' in part or ',' in part: + # 包含逗号,表示或关系 + part = part.replace(",", ",") + or_words = [word.strip().lower() for word in part.split(',') if word.strip()] + keep_conditions.append(("or", or_words)) + else: + # 不包含逗号,表示单个词 + keep_conditions.append(("single", [part.strip().lower()])) + + # 第一步:应用保留词筛选(链式筛选) + if keep_conditions: + for condition_type, words in keep_conditions: + filtered_by_keep = [] + for file in file_list: + file_name = file['file_name'].lower() + + if condition_type == "or": + # 或关系:包含任意一个词即可 + if any(word in file_name for word in words): + filtered_by_keep.append(file) + elif condition_type == "single": + # 单个词:必须包含 + if words[0] in file_name: + filtered_by_keep.append(file) + + file_list = filtered_by_keep + + # 第二步:应用过滤词过滤 + if filterwords_list: + filtered_files = [] + for file in file_list: + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + return filtered_files + + return file_list + def process_season_episode_info(filename, task_name=None): """ @@ -924,15 +1024,14 @@ def get_share_detail(): # 根据提取的排序值进行排序 sorted_files = sorted(files_to_process, key=extract_sort_value) - # 应用过滤词过滤 + # 应用高级过滤词过滤 filterwords = regex.get("filterwords", "") if filterwords: - # 同时支持中英文逗号分隔 - filterwords = filterwords.replace(",", ",") - filterwords_list = [word.strip() for word in filterwords.split(',')] + # 使用高级过滤函数 + filtered_files = advanced_filter_files(sorted_files, filterwords) + # 标记被过滤的文件 for item in sorted_files: - # 被过滤的文件不会有file_name_re,与不匹配正则的文件显示一致 - if any(word in item['file_name'] for word in filterwords_list): + if item not in filtered_files: item["filtered"] = True # 为每个文件分配序号 @@ -982,15 +1081,14 @@ def get_share_detail(): ] episode_patterns.extend(chinese_patterns) - # 应用过滤词过滤 + # 应用高级过滤词过滤 filterwords = regex.get("filterwords", "") if filterwords: - # 同时支持中英文逗号分隔 - filterwords = filterwords.replace(",", ",") - filterwords_list = [word.strip() for word in filterwords.split(',')] + # 使用高级过滤函数 + filtered_files = advanced_filter_files(share_detail["list"], filterwords) + # 标记被过滤的文件 for item in share_detail["list"]: - # 被过滤的文件显示一个 × - if any(word in item['file_name'] for word in filterwords_list): + if item not in filtered_files: item["filtered"] = True item["file_name_re"] = "×" @@ -1019,15 +1117,14 @@ def get_share_detail(): regex.get("magic_regex", {}), ) - # 应用过滤词过滤 + # 应用高级过滤词过滤 filterwords = regex.get("filterwords", "") if filterwords: - # 同时支持中英文逗号分隔 - filterwords = filterwords.replace(",", ",") - filterwords_list = [word.strip() for word in filterwords.split(',')] + # 使用高级过滤函数 + filtered_files = advanced_filter_files(share_detail["list"], filterwords) + # 标记被过滤的文件 for item in share_detail["list"]: - # 被过滤的文件不会有file_name_re,与不匹配正则的文件显示一致 - if any(word in item['file_name'] for word in filterwords_list): + if item not in filtered_files: item["filtered"] = True # 应用正则命名 @@ -2066,25 +2163,12 @@ def preview_rename(): if isinstance(files, dict) and files.get("error"): return jsonify({"success": False, "message": f"获取文件列表失败: {files.get('error', '未知错误')}"}) - # 过滤要排除的文件 - # 替换中文逗号为英文逗号 - filterwords = filterwords.replace(",", ",") - filter_list = [keyword.strip() for keyword in filterwords.split(",") if keyword.strip()] - filtered_files = [] - for file in files: - # 如果不包含文件夹且当前项是文件夹,跳过 - if not include_folders and file["dir"]: - continue - - # 检查是否包含过滤关键词 - should_filter = False - for keyword in filter_list: - if keyword and keyword in file["file_name"]: - should_filter = True - break - - if not should_filter: - filtered_files.append(file) + # 使用高级过滤函数过滤文件 + filtered_files = advanced_filter_files(files, filterwords) + + # 如果不包含文件夹,进一步过滤掉文件夹 + if not include_folders: + filtered_files = [file for file in filtered_files if not file["dir"]] # 按不同命名模式处理 preview_results = [] diff --git a/app/templates/index.html b/app/templates/index.html index 27c0d02..317a9b1 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -1044,11 +1044,11 @@ -
+
- +
@@ -1359,7 +1359,7 @@ @input="detectFileManagerNamingMode" :title="fileManager.use_sequence_naming ? '输入带{}占位符的重命名格式,如:剧名 - S01E{}、剧名.S03E{}等,{}将被替换为集序号(按文件名和修改日期智能排序)' : (fileManager.use_episode_naming ? '输入带[]占位符的重命名格式,如:剧名 - S01E[]、剧名.S03E[]等,[]将被替换为从文件名中提取的集编号' : '只重命名匹配到文件名的文件,留空不会进行重命名')"> - +
 含文件夹 @@ -1390,7 +1390,7 @@
过滤规则
- +  含文件夹 @@ -1971,11 +1971,11 @@
-
+
- +
diff --git a/quark_auto_save.py b/quark_auto_save.py index 109193f..0f94ed4 100644 --- a/quark_auto_save.py +++ b/quark_auto_save.py @@ -36,6 +36,106 @@ except ImportError: def close(self): pass +def advanced_filter_files(file_list, filterwords): + """ + 高级过滤函数,支持保留词和过滤词 + + Args: + file_list: 文件列表 + filterwords: 过滤规则字符串,支持以下格式: + - "加更,企划,超前,(1),mkv,nfo" # 只有过滤词 + - "期|加更,企划,超前,(1),mkv,nfo" # 保留词|过滤词 + - "期,2160P|加更,企划,超前,(1),mkv,nfo" # 多个保留词(或关系)|过滤词 + - "期|2160P|加更,企划,超前,(1),mkv,nfo" # 多个保留词(并关系)|过滤词 + - "期,2160P|" # 只有保留词,无过滤词 + + Returns: + 过滤后的文件列表 + """ + if not filterwords or not filterwords.strip(): + return file_list + + # 检查是否包含分隔符 | + if '|' not in filterwords: + # 只有过滤词的情况 + filterwords = filterwords.replace(",", ",") + filterwords_list = [word.strip().lower() for word in filterwords.split(',') if word.strip()] + + filtered_files = [] + for file in file_list: + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + return filtered_files + + # 包含分隔符的情况,需要解析保留词和过滤词 + parts = filterwords.split('|') + if len(parts) < 2: + # 格式错误,返回原列表 + return file_list + + # 最后一个|后面的是过滤词 + filter_part = parts[-1].strip() + # 前面的都是保留词 + keep_parts = [part.strip() for part in parts[:-1] if part.strip()] + + # 解析过滤词 + filterwords_list = [] + if filter_part: + filter_part = filter_part.replace(",", ",") + filterwords_list = [word.strip().lower() for word in filter_part.split(',') if word.strip()] + + # 解析保留词:每个|分隔的部分都是一个独立的筛选条件 + # 这些条件需要按顺序依次应用,形成链式筛选 + keep_conditions = [] + for part in keep_parts: + if part.strip(): + if ',' in part or ',' in part: + # 包含逗号,表示或关系 + part = part.replace(",", ",") + or_words = [word.strip().lower() for word in part.split(',') if word.strip()] + keep_conditions.append(("or", or_words)) + else: + # 不包含逗号,表示单个词 + keep_conditions.append(("single", [part.strip().lower()])) + + # 第一步:应用保留词筛选(链式筛选) + if keep_conditions: + for condition_type, words in keep_conditions: + filtered_by_keep = [] + for file in file_list: + file_name = file['file_name'].lower() + + if condition_type == "or": + # 或关系:包含任意一个词即可 + if any(word in file_name for word in words): + filtered_by_keep.append(file) + elif condition_type == "single": + # 单个词:必须包含 + if words[0] in file_name: + filtered_by_keep.append(file) + + file_list = filtered_by_keep + + # 第二步:应用过滤词过滤 + if filterwords_list: + filtered_files = [] + for file in file_list: + file_name = file['file_name'].lower() + file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') + + # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 + if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): + filtered_files.append(file) + + return filtered_files + + return file_list + # 全局的文件排序函数 def sort_file_by_name(file): """ @@ -1978,22 +2078,8 @@ class Quark: # 记录过滤前的文件总数(包括文件夹) original_total_count = len(share_file_list) - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 改进过滤逻辑,同时检查文件名和扩展名 - filtered_files = [] - for file in share_file_list: - file_name = file['file_name'].lower() - # 提取文件扩展名(不带点) - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - - share_file_list = filtered_files + # 使用高级过滤函数处理保留词和过滤词 + share_file_list = advanced_filter_files(share_file_list, task["filterwords"]) # 打印过滤信息(格式保持不变) # 计算剩余文件数 @@ -3008,31 +3094,8 @@ class Quark: # 记录过滤前的文件总数 original_total_count = len(dir_file_list) - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 过滤掉包含过滤词的文件 - filtered_files = [] - for file in dir_file_list: - if file.get("dir", False): - # 文件夹也需要检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - else: - # 文件检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - - dir_file_list = filtered_files + # 使用高级过滤函数处理保留词和过滤词 + dir_file_list = advanced_filter_files(dir_file_list, task["filterwords"]) dir_file_name_list = [item["file_name"] for item in dir_file_list] # 找出当前最大序号 @@ -3435,23 +3498,14 @@ class Quark: # 检查过滤词 should_filter = False if task.get("filterwords"): - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 检查原始文件名 - original_name_lower = share_file["file_name"].lower() - if any(word in original_name_lower for word in filterwords_list): - should_filter = True - - # 检查目标文件名 - save_name_lower = save_name.lower() - if any(word in save_name_lower for word in filterwords_list): - should_filter = True - - # 检查文件扩展名 - file_ext_lower = file_ext.lower().lstrip('.') - if any(word == file_ext_lower for word in filterwords_list): + # 使用高级过滤函数检查文件名 + temp_file_list = [{"file_name": share_file["file_name"]}] + if advanced_filter_files(temp_file_list, task["filterwords"]): + # 检查目标文件名 + temp_save_list = [{"file_name": save_name}] + if not advanced_filter_files(temp_save_list, task["filterwords"]): + should_filter = True + else: should_filter = True # 只处理不需要过滤的文件 @@ -3465,19 +3519,9 @@ class Quark: # 检查过滤词 should_filter = False if task.get("filterwords"): - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 检查原始文件名 - original_name_lower = share_file["file_name"].lower() - if any(word in original_name_lower for word in filterwords_list): - should_filter = True - - # 检查文件扩展名 - file_ext = os.path.splitext(share_file["file_name"])[1].lower() - file_ext_lower = file_ext.lstrip('.') - if any(word == file_ext_lower for word in filterwords_list): + # 使用高级过滤函数检查文件名 + temp_file_list = [{"file_name": share_file["file_name"]}] + if not advanced_filter_files(temp_file_list, task["filterwords"]): should_filter = True # 只处理不需要过滤的文件 @@ -3644,31 +3688,8 @@ class Quark: # 记录过滤前的文件总数 original_total_count = len(dir_file_list) - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 过滤掉包含过滤词的文件 - filtered_files = [] - for file in dir_file_list: - if file.get("dir", False): - # 文件夹也需要检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - else: - # 文件检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - - dir_file_list = filtered_files + # 使用高级过滤函数处理保留词和过滤词 + dir_file_list = advanced_filter_files(dir_file_list, task["filterwords"]) # 使用一个列表收集所有需要重命名的操作 rename_operations = [] @@ -3820,31 +3841,8 @@ class Quark: # 记录过滤前的文件总数 original_total_count = len(dir_file_list) - # 同时支持中英文逗号分隔 - filterwords = task["filterwords"].replace(",", ",") - filterwords_list = [word.strip().lower() for word in filterwords.split(',')] - - # 过滤掉包含过滤词的文件 - filtered_files = [] - for file in dir_file_list: - if file.get("dir", False): - # 文件夹也需要检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - else: - # 文件检查过滤词 - file_name = file['file_name'].lower() - file_ext = os.path.splitext(file_name)[1].lower().lstrip('.') - - # 检查过滤词是否存在于文件名中,或者过滤词等于扩展名 - if not any(word in file_name for word in filterwords_list) and not any(word == file_ext for word in filterwords_list): - filtered_files.append(file) - - dir_file_list = filtered_files + # 使用高级过滤函数处理保留词和过滤词 + dir_file_list = advanced_filter_files(dir_file_list, task["filterwords"]) # 使用一个列表收集所有需要重命名的操作 rename_operations = [] From fc49be58ed0dc3fd4bd5f82445a57808a0f5fddc Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Sun, 24 Aug 2025 00:20:21 +0800 Subject: [PATCH 03/13] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E6=96=87=E4=BB=B6=E4=B8=AD=E6=AD=A3=E5=88=99=E8=A1=A8=E8=BE=BE?= =?UTF-8?q?=E5=BC=8F=E8=BD=AC=E4=B9=89=E5=AD=97=E7=AC=A6=E5=86=97=E4=BD=99?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- quark_config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quark_config.json b/quark_config.json index 683844c..77f590d 100644 --- a/quark_config.json +++ b/quark_config.json @@ -67,7 +67,7 @@ ], "episode_patterns": [ { - "regex": "第(\\d+)集|第(\\d+)期|第(\\d+)话|(\\d+)集|(\\d+)期|(\\d+)话|[Ee][Pp]?(\\d+)|(\\d+)[-_\\\\s]*4[Kk]|\\[(\\d+)\\]|【(\\d+)】|_?(\\d+)_?" + "regex": "第(\\d+)集|第(\\d+)期|第(\\d+)话|(\\d+)集|(\\d+)期|(\\d+)话|[Ee][Pp]?(\\d+)|(\\d+)[-_\\s]*4[Kk]|\\[(\\d+)\\]|【(\\d+)】|_?(\\d+)_?" } ] } From a27c76637ba272e3a5a55ce5a057ff303e901dbb Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Mon, 25 Aug 2025 17:01:25 +0800 Subject: [PATCH 04/13] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E9=85=8D=E7=BD=AE=E6=A8=A1=E5=BC=8F=E8=AE=BE=E7=BD=AE=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=EF=BC=8C=E6=94=AF=E6=8C=81=E5=85=A8=E5=B1=80=E9=85=8D?= =?UTF-8?q?=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 插件配置支持独立配置或全局配置,默认为独立配置 - 独立配置模式下每个任务可单独设置插件参数,互不影响 - 全局配置模式下所有任务共用同一套插件参数,只能在系统配置页面修改,并会覆盖已有任务的配置,新建任务也会自动继承相应配置 - 支持 aria2、alist_strm_gen、emby 插件 --- app/run.py | 95 +++++++++++++++--- app/templates/index.html | 211 +++++++++++++++++++++++++++++++++++++-- plugins/aria2.py | 2 +- 3 files changed, 284 insertions(+), 24 deletions(-) diff --git a/app/run.py b/app/run.py index abb33d5..15eee65 100644 --- a/app/run.py +++ b/app/run.py @@ -486,6 +486,31 @@ def get_data(): data["plugins"]["alist"]["storage_id"] ) + # 初始化插件配置模式(如果不存在) + if "plugin_config_mode" not in data: + data["plugin_config_mode"] = { + "aria2": "independent", + "alist_strm_gen": "independent", + "emby": "independent" + } + + # 初始化全局插件配置(如果不存在) + if "global_plugin_config" not in data: + data["global_plugin_config"] = { + "aria2": { + "auto_download": True, + "pause": False, + "auto_delete_quark_files": False + }, + "alist_strm_gen": { + "auto_gen": True + }, + "emby": { + "try_match": True, + "media_id": "" + } + } + # 发送webui信息,但不发送密码原文 data["webui"] = { "username": config_data["webui"]["username"], @@ -505,6 +530,7 @@ def sync_task_plugins_config(): 4. 保留原有的自定义配置 5. 只处理已启用的插件(通过PLUGIN_FLAGS检查) 6. 清理被禁用插件的配置 + 7. 应用全局插件配置(如果启用) """ global config_data, task_plugins_config_default @@ -516,6 +542,10 @@ def sync_task_plugins_config(): disabled_plugins = set() if PLUGIN_FLAGS: disabled_plugins = {name.lstrip('-') for name in PLUGIN_FLAGS.split(',')} + + # 获取插件配置模式 + plugin_config_mode = config_data.get("plugin_config_mode", {}) + global_plugin_config = config_data.get("global_plugin_config", {}) # 遍历所有任务 for task in config_data["tasklist"]: @@ -533,23 +563,31 @@ def sync_task_plugins_config(): # 跳过被禁用的插件 if plugin_name in disabled_plugins: continue - - # 如果任务中没有该插件的配置,添加默认配置 - if plugin_name not in task["addition"]: - task["addition"][plugin_name] = default_config.copy() - else: - # 如果任务中有该插件的配置,检查是否有新的配置项 - current_config = task["addition"][plugin_name] - # 确保current_config是字典类型 - if not isinstance(current_config, dict): - # 如果不是字典类型,使用默认配置 + + # 检查是否使用全局配置模式 + if plugin_name in plugin_config_mode and plugin_config_mode[plugin_name] == "global": + # 使用全局配置 + if plugin_name in global_plugin_config: + task["addition"][plugin_name] = global_plugin_config[plugin_name].copy() + else: task["addition"][plugin_name] = default_config.copy() - continue - - # 遍历默认配置的每个键值对 - for key, default_value in default_config.items(): - if key not in current_config: - current_config[key] = default_value + else: + # 使用独立配置 + if plugin_name not in task["addition"]: + task["addition"][plugin_name] = default_config.copy() + else: + # 如果任务中有该插件的配置,检查是否有新的配置项 + current_config = task["addition"][plugin_name] + # 确保current_config是字典类型 + if not isinstance(current_config, dict): + # 如果不是字典类型,使用默认配置 + task["addition"][plugin_name] = default_config.copy() + continue + + # 遍历默认配置的每个键值对 + for key, default_value in default_config.items(): + if key not in current_config: + current_config[key] = default_value def parse_comma_separated_config(value): @@ -1528,6 +1566,31 @@ def init(): if plugin_name in disabled_plugins: del task["addition"][plugin_name] + # 初始化插件配置模式(如果不存在) + if "plugin_config_mode" not in config_data: + config_data["plugin_config_mode"] = { + "aria2": "independent", + "alist_strm_gen": "independent", + "emby": "independent" + } + + # 初始化全局插件配置(如果不存在) + if "global_plugin_config" not in config_data: + config_data["global_plugin_config"] = { + "aria2": { + "auto_download": True, + "pause": False, + "auto_delete_quark_files": False + }, + "alist_strm_gen": { + "auto_gen": True + }, + "emby": { + "try_match": True, + "media_id": "" + } + } + # 同步更新任务的插件配置 sync_task_plugins_config() diff --git a/app/templates/index.html b/app/templates/index.html index 317a9b1..846c703 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -554,6 +554,32 @@ :placeholder="getPluginConfigPlaceholder(pluginName, key)" :title="getPluginConfigHelp(pluginName, key)">
+ +
+
+ 插件配置模式 +
+ +
+ +
+
+
+ +
+ + +
+
@@ -1096,10 +1122,10 @@ -
+
- +
@@ -2023,10 +2049,10 @@ -
+
- +
@@ -2111,6 +2137,25 @@ api_page_size: 200, cache_expire_time: 30, discovery_items_count: 30 + }, + plugin_config_mode: { + aria2: "independent", + alist_strm_gen: "independent", + emby: "independent" + }, + global_plugin_config: { + aria2: { + auto_download: true, + pause: false, + auto_delete_quark_files: false + }, + alist_strm_gen: { + auto_gen: true + }, + emby: { + try_match: true, + media_id: "" + } } }, userInfoList: [], // 用户信息列表 @@ -2743,6 +2788,139 @@ return ''; }, + // 获取插件任务配置 + getPluginTaskConfig(pluginName) { + const taskConfigs = { + aria2: { + auto_download: true, + pause: false, + auto_delete_quark_files: false + }, + alist_strm_gen: { + auto_gen: true + }, + emby: { + try_match: true, + media_id: "" + } + }; + return taskConfigs[pluginName] || {}; + }, + + // 获取插件配置模式的帮助文本 + getPluginConfigModeHelp(pluginName) { + return "选择插件的配置模式:独立配置允许每个任务单独设置,全局配置则所有任务共享同一套设置,且只能在系统配置页面修改"; + }, + + // 获取插件任务配置的帮助文本 + getPluginTaskConfigHelp(pluginName, key) { + const helpTexts = { + aria2: { + auto_download: "是否自动添加下载任务", + pause: "添加任务后为暂停状态,不自动开始(手动下载)", + auto_delete_quark_files: "是否在添加下载任务后自动删除夸克网盘文件" + }, + alist_strm_gen: { + auto_gen: "是否自动生成 strm 文件" + }, + emby: { + try_match: "是否尝试匹配", + media_id: "媒体ID,当为0时不刷新" + } + }; + return helpTexts[pluginName]?.[key] || ''; + }, + + // 获取插件任务配置的占位符文本 + getPluginTaskConfigPlaceholder(pluginName, key) { + const placeholders = { + aria2: { + auto_download: "", + pause: "", + auto_delete_quark_files: "" + }, + alist_strm_gen: { + auto_gen: "" + }, + emby: { + try_match: "", + media_id: "输入媒体ID,留空则自动匹配" + } + }; + return placeholders[pluginName]?.[key] || ''; + }, + + // 检查插件配置是否被禁用 + isPluginConfigDisabled(task) { + for (const pluginName of ['aria2', 'alist_strm_gen', 'emby']) { + if (this.formData.plugin_config_mode[pluginName] === 'global') { + return true; + } + } + return false; + }, + + // 插件配置模式改变时的处理 + onPluginConfigModeChange(pluginName) { + if (this.formData.plugin_config_mode[pluginName] === 'global') { + // 切换到全局模式时,初始化全局配置 + if (!this.formData.global_plugin_config[pluginName]) { + this.formData.global_plugin_config[pluginName] = { ...this.getPluginTaskConfig(pluginName) }; + } + } + + // 更新新任务的配置,应用全局配置 + this.applyGlobalPluginConfig(this.newTask); + + // 更新影视发现页面创建任务的配置,应用全局配置 + if (this.createTask && this.createTask.taskData) { + this.applyGlobalPluginConfig(this.createTask.taskData); + } + }, + + // 全局插件配置改变时的处理 + onGlobalPluginConfigChange() { + // 更新新任务的配置,应用全局配置 + this.applyGlobalPluginConfig(this.newTask); + + // 更新影视发现页面创建任务的配置,应用全局配置 + if (this.createTask && this.createTask.taskData) { + this.applyGlobalPluginConfig(this.createTask.taskData); + } + }, + + // 应用全局插件配置到任务 + applyGlobalPluginConfig(task) { + if (!task.addition) { + task.addition = {}; + } + + for (const pluginName of ['aria2', 'alist_strm_gen', 'emby']) { + if (this.formData.plugin_config_mode[pluginName] === 'global') { + // 应用全局配置到任务 + task.addition[pluginName] = { ...this.formData.global_plugin_config[pluginName] }; + } + } + }, + + + + // 获取插件配置的悬停提示文本 + getPluginConfigTitle(task) { + if (this.isPluginConfigDisabled(task)) { + return `单个任务的插件配置,具体键值由插件定义,当前有部分插件使用了全局配置模式,在该模式下对应的配置选项将被锁定,若要修改配置,请前往系统配置页面进行操作,查阅Wiki了解详情`; + } + return "单个任务的插件配置,具体键值由插件定义,查阅Wiki了解详情"; + }, + + // 获取创建任务时的插件配置悬停提示文本 + getCreateTaskPluginConfigTitle() { + if (this.isPluginConfigDisabled(this.createTask.taskData)) { + return `单个任务的插件配置,具体键值由插件定义,当前有部分插件使用了全局配置模式,在该模式下对应的配置选项将被锁定,若要修改配置,请前往系统配置页面进行操作,查阅Wiki了解详情`; + } + return "单个任务的插件配置,具体键值由插件定义,查阅Wiki了解详情"; + }, + fetchUserInfo() { // 获取所有cookie对应的用户信息 axios.get('/get_user_info') @@ -3153,7 +3331,9 @@ if (!this.taskDirs.includes(parentDir)) this.taskDirs.push(parentDir); }); - this.newTask.addition = config_data.task_plugins_config_default; + // 初始化新任务的插件配置,应用全局配置 + this.newTask.addition = { ...config_data.task_plugins_config_default }; + this.applyGlobalPluginConfig(this.newTask); // 确保source配置存在 if (!config_data.source) { config_data.source = {}; @@ -3297,6 +3477,9 @@ task.episode_naming = task.pattern; } } + + // 应用全局插件配置 + this.applyGlobalPluginConfig(task); }); } @@ -3368,6 +3551,9 @@ newTask.replace = lastTask.replace || ""; } + // 应用全局插件配置到新任务 + this.applyGlobalPluginConfig(newTask); + this.formData.tasklist.push(newTask) const index = this.formData.tasklist.length - 1; @@ -7548,9 +7734,10 @@ this.createTask.taskData.startfid = ""; this.createTask.taskData.update_subdir = ""; - // 设置默认的插件配置 + // 设置默认的插件配置,并应用全局配置 if (this.formData.task_plugins_config_default) { this.createTask.taskData.addition = { ...this.formData.task_plugins_config_default }; + this.applyGlobalPluginConfig(this.createTask.taskData); } return; } @@ -7644,9 +7831,10 @@ this.createTask.taskData.startfid = ""; this.createTask.taskData.update_subdir = ""; - // 设置默认的插件配置 + // 设置默认的插件配置,并应用全局配置 if (this.formData.task_plugins_config_default) { this.createTask.taskData.addition = { ...this.formData.task_plugins_config_default }; + this.applyGlobalPluginConfig(this.createTask.taskData); } }, isUsingCustomTaskSettingsForType(taskSettings, contentType) { @@ -7974,6 +8162,9 @@ // 创建新任务 const newTask = { ...this.createTask.taskData }; + // 应用全局插件配置 + this.applyGlobalPluginConfig(newTask); + // 处理命名模式 if (newTask.use_sequence_naming) { newTask.pattern = newTask.sequence_naming; @@ -8034,6 +8225,9 @@ // 创建新任务 const newTask = { ...this.createTask.taskData }; + // 应用全局插件配置 + this.applyGlobalPluginConfig(newTask); + // 处理命名模式 if (newTask.use_sequence_naming) { newTask.pattern = newTask.sequence_naming; @@ -8103,6 +8297,9 @@ // 创建新任务 const newTask = { ...this.createTask.taskData }; + // 应用全局插件配置 + this.applyGlobalPluginConfig(newTask); + // 处理命名模式 if (newTask.use_sequence_naming) { newTask.pattern = newTask.sequence_naming; diff --git a/plugins/aria2.py b/plugins/aria2.py index a1b3b7e..7646a16 100644 --- a/plugins/aria2.py +++ b/plugins/aria2.py @@ -45,7 +45,7 @@ class Aria2: "dir": "/downloads", # 下载目录,需要Aria2有权限访问 } default_task_config = { - "auto_download": False, # 是否自动添加下载任务 + "auto_download": True, # 是否自动添加下载任务 "pause": False, # 添加任务后为暂停状态,不自动开始(手动下载) "auto_delete_quark_files": False, # 是否在添加下载任务后自动删除夸克网盘文件 } From 7bf5e7423aba3a96e7e9f4c3d00cecd4dc8a01b8 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Tue, 26 Aug 2025 01:57:34 +0800 Subject: [PATCH 05/13] =?UTF-8?q?=E4=B8=BA=E8=B5=84=E6=BA=90=E6=90=9C?= =?UTF-8?q?=E7=B4=A2=E5=8A=9F=E8=83=BD=E6=96=B0=E5=A2=9E=E4=BA=86=20PanSou?= =?UTF-8?q?=20=E6=9C=8D=E5=8A=A1=E5=99=A8=E6=94=AF=E6=8C=81=EF=BC=8C?= =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BA=86=E6=90=9C=E7=B4=A2=E7=BB=93=E6=9E=9C?= =?UTF-8?q?=E7=9A=84=E5=B1=95=E7=A4=BA=E4=B8=8E=E6=8E=92=E5=BA=8F=E6=96=B9?= =?UTF-8?q?=E5=BC=8F=EF=BC=8C=E8=B0=83=E6=95=B4=E4=BA=86=E7=B3=BB=E7=BB=9F?= =?UTF-8?q?=E9=85=8D=E7=BD=AE=E9=A1=B5=E9=9D=A2=E7=9A=84=E6=A8=A1=E5=9D=97?= =?UTF-8?q?=E9=A1=BA=E5=BA=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/run.py | 108 +++++++++++++++++----- app/sdk/cloudsaver.py | 29 +++++- app/sdk/pansou.py | 188 +++++++++++++++++++++++++++++++++++++++ app/static/css/main.css | 41 ++++++++- app/templates/index.html | 174 ++++++++++++++++++++++-------------- 5 files changed, 448 insertions(+), 92 deletions(-) create mode 100644 app/sdk/pansou.py diff --git a/app/run.py b/app/run.py index 15eee65..f5c0edf 100644 --- a/app/run.py +++ b/app/run.py @@ -16,6 +16,10 @@ from flask import ( from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger from sdk.cloudsaver import CloudSaver +try: + from sdk.pansou import PanSou +except Exception: + PanSou = None from datetime import timedelta, datetime import subprocess import requests @@ -511,6 +515,14 @@ def get_data(): } } + # 初始化搜索来源默认结构 + if "source" not in data or not isinstance(data.get("source"), dict): + data["source"] = {} + # CloudSaver 默认字段 + data["source"].setdefault("cloudsaver", {"server": "", "username": "", "password": "", "token": ""}) + # PanSou 默认字段 + data["source"].setdefault("pansou", {"server": "https://so.252035.xyz"}) + # 发送webui信息,但不发送密码原文 data["webui"] = { "username": config_data["webui"]["username"], @@ -929,7 +941,14 @@ def get_task_suggestions(): search_query = extract_show_name(query) try: - cs_data = config_data.get("source", {}).get("cloudsaver", {}) + sources_cfg = config_data.get("source", {}) or {} + cs_data = sources_cfg.get("cloudsaver", {}) + ps_data = sources_cfg.get("pansou", {}) + + merged = [] + providers = [] + + # CloudSaver if ( cs_data.get("server") and cs_data.get("username") @@ -941,35 +960,76 @@ def get_task_suggestions(): cs_data.get("password", ""), cs_data.get("token", ""), ) - # 使用处理后的搜索关键词 search = cs.auto_login_search(search_query) if search.get("success"): if search.get("new_token"): cs_data["token"] = search.get("new_token") Config.write_json(CONFIG_PATH, config_data) search_results = cs.clean_search_results(search.get("data")) - # 在返回结果中添加实际使用的搜索关键词 - return jsonify( - { - "success": True, - "source": "CloudSaver", - "data": search_results - } - ) - else: - return jsonify({"success": True, "message": search.get("message")}) - else: - base_url = base64.b64decode("aHR0cHM6Ly9zLjkxNzc4OC54eXo=").decode() - # 使用处理后的搜索关键词 - url = f"{base_url}/task_suggestions?q={search_query}&d={deep}" - response = requests.get(url) - return jsonify( - { - "success": True, - "source": "网络公开", - "data": response.json() - } - ) + if isinstance(search_results, list): + merged.extend(search_results) + providers.append("CloudSaver") + + # PanSou + if ps_data and ps_data.get("server") and PanSou is not None: + try: + ps = PanSou(ps_data.get("server")) + result = ps.search(search_query) + if result.get("success") and isinstance(result.get("data"), list): + merged.extend(result.get("data")) + providers.append("PanSou") + except Exception as e: + logging.warning(f"PanSou 搜索失败: {str(e)}") + + # 去重(按shareurl优先,其次taskname) + dedup = [] + seen = set() + for item in merged: + if not isinstance(item, dict): + continue + key = item.get("shareurl") or item.get("taskname") + if not key: + continue + if key in seen: + continue + seen.add(key) + dedup.append(item) + + # 全局时间排序:所有来源的结果混合排序,按时间倒序(最新的在前) + if dedup: + def parse_datetime_for_sort(item): + """解析时间字段,返回可比较的时间戳""" + # 兼容两个字段名:publish_date 和 datetime + datetime_str = item.get("publish_date") or item.get("datetime") + if not datetime_str: + return 0 # 没有时间的排在最后 + try: + from datetime import datetime + # 尝试解析格式: 2025-01-01 12:00:00 + dt = datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S") + return dt.timestamp() + except: + return 0 # 解析失败排在最后 + + # 按时间倒序排序(最新的在前) + dedup.sort(key=parse_datetime_for_sort, reverse=True) + + return jsonify({ + "success": True, + "source": ", ".join(providers) if providers else "聚合", + "data": dedup + }) + + # 若无本地可用来源,回退到公开网络 + base_url = base64.b64decode("aHR0cHM6Ly9zLjkxNzc4OC54eXo=").decode() + url = f"{base_url}/task_suggestions?q={search_query}&d={deep}" + response = requests.get(url) + return jsonify({ + "success": True, + "source": "网络公开", + "data": response.json() + }) + except Exception as e: return jsonify({"success": True, "message": f"error: {str(e)}"}) diff --git a/app/sdk/cloudsaver.py b/app/sdk/cloudsaver.py index a095a59..8509118 100644 --- a/app/sdk/cloudsaver.py +++ b/app/sdk/cloudsaver.py @@ -124,6 +124,10 @@ class CloudSaver: content = content.replace('', "") content = content.replace("", "") content = content.strip() + # 获取发布时间 - 采用与原始实现一致的方式 + pubdate = item.get("pubDate", "") # 使用 pubDate 字段 + if pubdate: + pubdate = self._iso_to_cst(pubdate) # 转换为中国标准时间 # 链接去重 if link.get("link") not in link_array: link_array.append(link.get("link")) @@ -132,12 +136,33 @@ class CloudSaver: "shareurl": link.get("link"), "taskname": title, "content": content, + "datetime": pubdate, # 使用 datetime 字段名,与原始实现一致 "tags": item.get("tags", []), - "channel": item.get("channel", ""), - "channel_id": item.get("channelId", ""), + "channel": item.get("channelId", ""), + "source": "CloudSaver" } ) + + # 注意:排序逻辑已移至全局,这里不再进行内部排序 + # 返回原始顺序的结果,由全局排序函数统一处理 return clean_results + + def _iso_to_cst(self, iso_time_str: str) -> str: + """将 ISO 格式的时间字符串转换为 CST(China Standard Time) 时间并格式化为 %Y-%m-%d %H:%M:%S 格式 + + Args: + iso_time_str (str): ISO 格式时间字符串 + + Returns: + str: CST(China Standard Time) 时间字符串 + """ + try: + from datetime import datetime, timezone, timedelta + dt = datetime.fromisoformat(iso_time_str) + dt_cst = dt.astimezone(timezone(timedelta(hours=8))) + return dt_cst.strftime("%Y-%m-%d %H:%M:%S") if dt_cst.year >= 1970 else "" + except: + return iso_time_str # 转换失败时返回原始字符串 # 测试示例 diff --git a/app/sdk/pansou.py b/app/sdk/pansou.py new file mode 100644 index 0000000..726c3f3 --- /dev/null +++ b/app/sdk/pansou.py @@ -0,0 +1,188 @@ +import requests +import json +from typing import List, Dict, Any + + +class PanSou: + """PanSou 资源搜索客户端""" + + def __init__(self, server: str): + self.server = server.rstrip("/") if server else "" + self.session = requests.Session() + # 使用标准请求头 + self.session.headers.update({ + "Content-Type": "application/json", + "Accept": "application/json", + "User-Agent": "QASX-PanSouClient/1.0" + }) + + def _request_json(self, url: str, params: dict): + """发送 GET 请求并解析 JSON 响应""" + try: + resp = self.session.get(url, params=params, timeout=15) + return resp.json() + except Exception as e: + return {"success": False, "message": str(e)} + + def search(self, keyword: str): + """ + 搜索资源(仅返回夸克网盘结果) + 返回:{"success": True, "data": [{taskname, content, shareurl, tags[]}]} + """ + if not self.server: + return {"success": False, "message": "PanSou未配置服务器"} + + # 使用已验证的参数:kw + cloud_types=quark + res=all + params = { + "kw": keyword, + "cloud_types": "quark", # 单个类型用字符串,多个类型用逗号分隔 + "res": "all" + } + + # 优先使用 /api/search 路径 + url = f"{self.server}/api/search" + result = self._request_json(url, params) + + if not result: + return {"success": False, "message": "PanSou请求失败"} + + # 解析响应:兼容 {code, message, data: {results, merged_by_type}} 格式 + payload = result + if isinstance(result.get("data"), dict): + payload = result["data"] + + # 检查错误码 + if "code" in result and result.get("code") != 0: + return {"success": False, "message": result.get("message") or "PanSou搜索失败"} + + # 解析结果:优先 results,然后 merged_by_type + cleaned = [] + + try: + # 1) results: 主要结果数组,每个结果包含 title 和 links + results = payload.get("results", []) + if isinstance(results, list): + for result_item in results: + if not isinstance(result_item, dict): + continue + + # 从 result_item 获取标题、内容和发布日期 + title = result_item.get("title", "") + content = result_item.get("content", "") + datetime_str = result_item.get("datetime", "") # 获取发布日期 + + # 从 links 获取具体链接 + links = result_item.get("links", []) + if isinstance(links, list): + for link in links: + if isinstance(link, dict): + url = link.get("url", "") + link_type = link.get("type", "") + if url: # 确保有有效链接 + cleaned.append({ + "taskname": title, + "content": content, + "shareurl": url, + "tags": [link_type] if link_type else (result_item.get("tags", []) or []), + "publish_date": datetime_str, # 添加发布日期字段 + "source": "PanSou" # 添加来源标识 + }) + + # 2) merged_by_type: 兜底解析,使用 note 字段作为标题 + if not cleaned: + merged = payload.get("merged_by_type") + if isinstance(merged, dict): + for cloud_type, links in merged.items(): + if isinstance(links, list): + for link in links: + if isinstance(link, dict): + # 从 merged_by_type 获取链接信息 + url = link.get("url", "") + note = link.get("note", "") # 使用 note 字段作为标题 + datetime_str = link.get("datetime", "") # 获取发布日期 + if url: + cleaned.append({ + "taskname": note, + "content": note, # 如果没有 content,使用 note + "shareurl": url, + "tags": [cloud_type] if cloud_type else [], + "publish_date": datetime_str, # 添加发布日期字段 + "source": "PanSou" # 添加来源标识 + }) + + # 3) 直接 data 数组兜底 + if not cleaned and isinstance(payload, list): + for item in payload: + if isinstance(item, dict): + cleaned.append({ + "taskname": item.get("title", ""), + "content": item.get("content", ""), + "shareurl": item.get("url", ""), + "tags": item.get("tags", []) or [], + "publish_date": item.get("datetime", ""), # 添加发布日期字段 + "source": "PanSou" # 添加来源标识 + }) + + except Exception as e: + return {"success": False, "message": f"解析PanSou结果失败: {str(e)}"} + + # 二次过滤:确保只返回夸克网盘链接 + if cleaned: + filtered = [] + for item in cleaned: + try: + url = item.get("shareurl", "") + tags = item.get("tags", []) or [] + # 检查是否为夸克网盘 + is_quark = ("quark" in tags) or ("pan.quark.cn" in url) + if is_quark: + filtered.append(item) + except Exception: + continue + cleaned = filtered + + if not cleaned: + return {"success": False, "message": "PanSou搜索无夸克网盘结果"} + + # 去重:按 shareurl 去重 + seen_urls = set() + unique_results = [] + for item in cleaned: + url = item.get("shareurl", "") + if url and url not in seen_urls: + seen_urls.add(url) + unique_results.append(item) + + # 按发布日期排序:最新的在前 + def parse_datetime(datetime_str): + """解析日期时间字符串,返回可比较的时间戳""" + if not datetime_str: + return 0 # 没有日期的排在最后 + try: + from datetime import datetime, timezone, timedelta + # 尝试解析 ISO 8601 格式: 2025-07-28T20:43:27Z + dt = datetime.fromisoformat(datetime_str.replace('Z', '+00:00')) + return dt.timestamp() + except: + return 0 # 解析失败排在最后 + + def convert_to_cst(datetime_str): + """将 ISO 时间转换为中国标准时间 (CST)""" + if not datetime_str: + return "" + try: + from datetime import datetime, timezone, timedelta + dt = datetime.fromisoformat(datetime_str.replace('Z', '+00:00')) + dt_cst = dt.astimezone(timezone(timedelta(hours=8))) + return dt_cst.strftime("%Y-%m-%d %H:%M:%S") + except: + return datetime_str # 转换失败时返回原始字符串 + + # 转换时间为中国标准时间格式 + for item in unique_results: + if item.get("publish_date"): + item["publish_date"] = convert_to_cst(item["publish_date"]) + + # 注意:排序逻辑已移至全局,这里不再进行内部排序 + # 返回原始顺序的结果,由全局排序函数统一处理 + return {"success": True, "data": unique_results} diff --git a/app/static/css/main.css b/app/static/css/main.css index 6494d01..7ba300c 100644 --- a/app/static/css/main.css +++ b/app/static/css/main.css @@ -3218,7 +3218,7 @@ div[data-toggle="collapse"] .btn.text-left i.bi-caret-right-fill { color: inherit; transition: transform 0.2s; position: relative; - top: 0.5px; /* 调整箭头垂直对齐,使其与文本居中 */ + top: 0; /* 调整箭头垂直对齐,使其与文本居中 */ font-size: 0.95rem; /* 调整箭头大小与文本比例协调 */ margin-right: 4px; /* 添加右侧间距使与文字有适当间距 */ } @@ -6399,3 +6399,42 @@ body .selectable-files tr.selected-file:has([style*="white-space: normal"]) .fil font-family: inherit; letter-spacing: normal; } + +/* 仅在“搜索来源”前的最后一个插件折叠时,将间距减少 2px */ +div:has(> .collapse:not(.show)):has(+ .row.title[title^="资源搜索"]) { + margin-bottom: -10px !important; /* override inline -8px only for collapsed state */ +} + +/* 修复系统配置页面性能设置与API接口模块间距问题 */ +.row.mb-2.performance-setting-row + .row.title[title^="API接口"] { + margin-top: 0 !important; /* prevent unexpected collapse stacking */ + padding-top: 4px !important; /* adds effective +4px spacing */ +} + +/* --------------- 来源标识样式 --------------- */ +.source-badge { + display: inline-block; + margin-left: 1px; + font-size: 14px; + line-height: 1.2; + white-space: nowrap; + vertical-align: baseline; + color: var(--light-text-color); + background-color: transparent; +} + +.source-badge::before { + content: "· "; + margin-right: 0px; + color: var(--light-text-color); +} + +.source-badge::after { + content: attr(data-publish-date); + margin-left: 2px; + color: var(--light-text-color); + font-size: 14px; + line-height: 1.2; + white-space: nowrap; + vertical-align: baseline; +} \ No newline at end of file diff --git a/app/templates/index.html b/app/templates/index.html index 846c703..f8067ae 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -509,7 +509,7 @@
-

通知

+

通知设置

@@ -531,7 +531,7 @@
-

插件

+

插件设置

@@ -541,7 +541,7 @@
- +
@@ -582,6 +582,72 @@
+ +
+
+

搜索来源

+ + + +
+
+ +
+ +
+
+
+
+ CloudSaver +
+
+
+
+
+
+ 服务器 +
+ +
+
+
+ 用户名 +
+ +
+
+
+ 密码 +
+ +
+ +
+
+
+
+ + +
+
+ +
+
+
+
+ 服务器 +
+ +
+
+
+
@@ -718,59 +784,6 @@
-
-
-

API

- - - -
-
-
-
- Token -
- -
- -
-
-

CloudSaver

- - - -
-
-
-
- 服务器 -
- -
-
-
-
-
- 用户名 -
- -
-
-
-
-
- 密码 -
- -
- -
-
-
-
-

显示设置

@@ -910,6 +923,20 @@
+
+
+

API

+ + + +
+
+
+
+ Token +
+ +
@@ -999,6 +1026,7 @@ {{ suggestion.taskname }} {{ suggestion.shareurl }} + {{ suggestion.source }} @@ -1926,6 +1954,7 @@ {{ suggestion.taskname }} {{ suggestion.shareurl }} + {{ suggestion.source }} @@ -2094,6 +2123,14 @@ showCloudSaverPassword: false, showWebuiPassword: false, pageWidthMode: 'medium', // 页面宽度模式:narrow, medium, wide + pluginDisplayAliases: { + alist: 'AList', + alist_strm: 'AList Strm', + alist_strm_gen: 'AList Strm Gen', + aria2: 'Aria2', + emby: 'Emby', + plex: 'Plex' + }, formData: { cookie: [], push_config: {}, @@ -2118,6 +2155,9 @@ username: "", password: "", token: "" + }, + pansou: { + server: "https://so.252035.xyz" } }, webui: { @@ -2718,6 +2758,10 @@ document.removeEventListener('click', this.handleOutsideClick); }, methods: { + // 获取插件展示名称(支持别名,仅用于WebUI显示) + getPluginDisplayName(pluginName) { + return this.pluginDisplayAliases[pluginName] || pluginName; + }, // 设置移动端任务列表展开/收起状态监听 setupMobileTaskListToggle() { // 监听所有collapse事件 @@ -7632,10 +7676,10 @@ // 打开创建任务模态框 $('#createTaskModal').modal('show'); - // 如果启用了自动搜索资源且配置了有效的CloudSaver信息,自动触发资源搜索 + // 如果启用了自动搜索资源且配置了有效的搜索来源,自动触发资源搜索 this.$nextTick(() => { if (this.formData.task_settings.auto_search_resources === 'enabled' && - this.isCloudSaverConfigValid() && + this.hasAnyValidSearchSource() && this.createTask.taskData.taskname) { this.searchSuggestions(-1, this.createTask.taskData.taskname); } @@ -7644,13 +7688,13 @@ console.error('创建任务时出错:', error); } }, - isCloudSaverConfigValid() { - // 检查CloudSaver配置是否有效 - const csData = this.formData.source && this.formData.source.cloudsaver; - return csData && - csData.server && - csData.username && - csData.password; + hasAnyValidSearchSource() { + const src = this.formData.source || {}; + const cs = src.cloudsaver || {}; + const ps = src.pansou || {}; + const csValid = cs.server && cs.username && cs.password; + const psValid = ps.server; + return !!(csValid || psValid); }, smartFillTaskData(item, movieData) { // 智能填充任务数据 From 953afd87582eba8791cfb6ade7c276fa23c58fc9 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Tue, 26 Aug 2025 15:27:36 +0800 Subject: [PATCH 06/13] =?UTF-8?q?=E4=BC=98=E5=8C=96=E6=90=9C=E7=B4=A2?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E4=B8=AD=E7=9A=84=E9=93=BE=E6=8E=A5=E6=98=BE?= =?UTF-8?q?=E7=A4=BA=E6=96=B9=E5=BC=8F=EF=BC=8C=E6=94=B9=E4=B8=BA=E6=98=BE?= =?UTF-8?q?=E7=A4=BA=E5=88=86=E4=BA=AB=20ID=20=E8=80=8C=E9=9D=9E=E5=AE=8C?= =?UTF-8?q?=E6=95=B4=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/static/css/main.css | 6 +++--- app/templates/index.html | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/app/static/css/main.css b/app/static/css/main.css index 7ba300c..b2b4cbe 100644 --- a/app/static/css/main.css +++ b/app/static/css/main.css @@ -6424,14 +6424,14 @@ div:has(> .collapse:not(.show)):has(+ .row.title[title^="资源搜索"]) { } .source-badge::before { - content: "· "; - margin-right: 0px; + content: " · "; + margin-right: 0; color: var(--light-text-color); } .source-badge::after { content: attr(data-publish-date); - margin-left: 2px; + margin-left: 0; color: var(--light-text-color); font-size: 14px; line-height: 1.2; diff --git a/app/templates/index.html b/app/templates/index.html index f8067ae..af47b5a 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -1025,8 +1025,8 @@ @@ -1953,8 +1953,8 @@ From 54bcd0906a80f12320c10dcc098bd334262c6e56 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Tue, 26 Aug 2025 15:53:12 +0800 Subject: [PATCH 07/13] =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=90=9C=E7=B4=A2=E7=BB=93=E6=9E=9C=E7=9A=84=E6=98=BE=E7=A4=BA?= =?UTF-8?q?=E6=96=B9=E5=BC=8F=E5=92=8C=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/templates/index.html | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/app/templates/index.html b/app/templates/index.html index af47b5a..5d48b06 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -1020,7 +1020,7 @@ 正在搜索中... - @@ -1945,16 +1945,16 @@ 正在验证链接有效性...({{ smart_param.validateProgress.current }}/{{ smart_param.validateProgress.total }})已找到 {{ smart_param.validateProgress.valid }} 个有效链接 - 正在搜索中... + 正在搜索资源... - @@ -2758,6 +2758,24 @@ document.removeEventListener('click', this.handleOutsideClick); }, methods: { + // 仅当有有效信息时返回悬停提示,否则返回null以不显示 + getSuggestionHoverTitle(suggestion) { + if (!suggestion) return null; + let content = (suggestion.content || '').trim(); + if (!content) return null; + // 统一标点为英文冒号,统一逗号 + const normalized = content + .replace(/:/g, ':') + .replace(/,/g, ',') + .replace(/\s+/g, ' ') + .trim(); + // 仅在明确的占位文本时隐藏: + // 1) 全文就是“大小:-” + if (/^大小\s*:\s*-$/i.test(normalized)) return null; + // 2) 完全匹配“类别:xx, 文件类型:yy, 大小:-”这类占位 + if (/^类别\s*:[^,]*,\s*文件类型\s*:[^,]*,\s*大小\s*:\s*-$/i.test(normalized)) return null; + return content; + }, // 获取插件展示名称(支持别名,仅用于WebUI显示) getPluginDisplayName(pluginName) { return this.pluginDisplayAliases[pluginName] || pluginName; @@ -4312,7 +4330,7 @@ // 解析时间用于排序(降序:最新在前) const getItemTs = (item) => { - const raw = item.publish_date || item.datetime || ''; + const raw = item.publish_date || ''; const ts = Date.parse(raw); return isNaN(ts) ? 0 : ts; }; @@ -4445,7 +4463,7 @@ // 结束前做一次排序,确保最终顺序正确 const getItemTs = (item) => { - const raw = item.publish_date || item.datetime || ''; + const raw = item.publish_date || ''; const ts = Date.parse(raw); return isNaN(ts) ? 0 : ts; }; From 6fd9683ff94eb25e13d682ce8d28c30b16a74fa0 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Tue, 26 Aug 2025 17:58:08 +0800 Subject: [PATCH 09/13] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=90=9C=E7=B4=A2?= =?UTF-8?q?=E6=9D=A5=E6=BA=90=E9=85=8D=E7=BD=AE=E6=8F=90=E7=A4=BA=E4=BF=A1?= =?UTF-8?q?=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/templates/index.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/templates/index.html b/app/templates/index.html index 0a06331..e6fb489 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -607,19 +607,19 @@
服务器
- +
用户名
- +
密码
- +
From 5c50453acd2d163c835b2391711539242e6284ee Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Wed, 27 Aug 2025 01:05:19 +0800 Subject: [PATCH 10/13] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=90=9C=E7=B4=A2=E7=BB=93=E6=9E=9C=E5=8F=91=E5=B8=83=E6=97=A5?= =?UTF-8?q?=E6=9C=9F=E6=97=B6=E5=8C=BA=EF=BC=88=E6=98=BE=E7=A4=BA=EF=BC=89?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/run.py | 24 +++++++++++++---- app/sdk/cloudsaver.py | 40 +++++++++++++--------------- app/sdk/pansou.py | 52 ++++++++++++------------------------- app/templates/index.html | 56 ++++++++++++++++++++++++++++++---------- 4 files changed, 96 insertions(+), 76 deletions(-) diff --git a/app/run.py b/app/run.py index 9315fd4..d4f168f 100644 --- a/app/run.py +++ b/app/run.py @@ -1061,19 +1061,33 @@ def get_task_suggestions(): seen_fingerprints.add(fingerprint) dedup.append(item) - # 全局时间排序:所有来源的结果混合排序,按时间倒序(最新的在前) + # 仅在排序时对多种格式进行解析(优先解析 YYYY-MM-DD HH:mm:ss,其次 ISO) if dedup: def parse_datetime_for_sort(item): """解析时间字段,返回可比较的时间戳(统一以 publish_date 为准)""" datetime_str = item.get("publish_date") if not datetime_str: return 0 # 没有时间的排在最后 + from datetime import datetime + s = str(datetime_str).strip() + # 优先解析标准显示格式 try: - from datetime import datetime - # 尝试解析格式: 2025-01-01 12:00:00 - dt = datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S") + dt = datetime.strptime(s, "%Y-%m-%d %H:%M:%S") return dt.timestamp() - except: + except Exception: + pass + # 补充解析仅日期格式 + try: + dt = datetime.strptime(s, "%Y-%m-%d") + return dt.timestamp() + except Exception: + pass + # 其次尝试 ISO(支持 Z/偏移) + try: + s2 = s.replace('Z', '+00:00') + dt = datetime.fromisoformat(s2) + return dt.timestamp() + except Exception: return 0 # 解析失败排在最后 # 按时间倒序排序(最新的在前) diff --git a/app/sdk/cloudsaver.py b/app/sdk/cloudsaver.py index 8509118..802f185 100644 --- a/app/sdk/cloudsaver.py +++ b/app/sdk/cloudsaver.py @@ -106,6 +106,19 @@ class CloudSaver: pattern_title = r"(名称|标题)[::]?(.*)" pattern_content = r"(描述|简介)[::]?(.*)(链接|标签)" clean_results = [] + # 工具:移除标题中的链接(http/https 以及常见裸域名的夸克分享) + def strip_links(text: str) -> str: + if not isinstance(text, str): + return text + s = text + import re + # 去除 http/https 链接 + s = re.sub(r"https?://\S+", "", s) + # 去除裸域夸克分享链接(不带协议的 pan.quark.cn/...) + s = re.sub(r"\bpan\.quark\.cn/\S+", "", s) + # 收尾多余空白和分隔符 + s = re.sub(r"\s+", " ", s).strip(" -|·,,::;;" + " ") + return s.strip() link_array = [] for channel in search_results: for item in channel.get("list", []): @@ -117,6 +130,8 @@ class CloudSaver: if match := re.search(pattern_title, title, re.DOTALL): title = match.group(2) title = title.replace("&", "&").strip() + # 标题去除链接 + title = strip_links(title) # 清洗内容 content = item.get("content", "") if match := re.search(pattern_content, content, re.DOTALL): @@ -125,9 +140,8 @@ class CloudSaver: content = content.replace("", "") content = content.strip() # 获取发布时间 - 采用与原始实现一致的方式 - pubdate = item.get("pubDate", "") # 使用 pubDate 字段 - if pubdate: - pubdate = self._iso_to_cst(pubdate) # 转换为中国标准时间 + pubdate_iso = item.get("pubDate", "") # 原始时间字符串(可能为 ISO 或已是北京时间) + pubdate = pubdate_iso # 不做时区转换,保留来源原始时间 # 链接去重 if link.get("link") not in link_array: link_array.append(link.get("link")) @@ -136,7 +150,7 @@ class CloudSaver: "shareurl": link.get("link"), "taskname": title, "content": content, - "datetime": pubdate, # 使用 datetime 字段名,与原始实现一致 + "datetime": pubdate, # 显示用时间 "tags": item.get("tags", []), "channel": item.get("channelId", ""), "source": "CloudSaver" @@ -146,24 +160,6 @@ class CloudSaver: # 注意:排序逻辑已移至全局,这里不再进行内部排序 # 返回原始顺序的结果,由全局排序函数统一处理 return clean_results - - def _iso_to_cst(self, iso_time_str: str) -> str: - """将 ISO 格式的时间字符串转换为 CST(China Standard Time) 时间并格式化为 %Y-%m-%d %H:%M:%S 格式 - - Args: - iso_time_str (str): ISO 格式时间字符串 - - Returns: - str: CST(China Standard Time) 时间字符串 - """ - try: - from datetime import datetime, timezone, timedelta - dt = datetime.fromisoformat(iso_time_str) - dt_cst = dt.astimezone(timezone(timedelta(hours=8))) - return dt_cst.strftime("%Y-%m-%d %H:%M:%S") if dt_cst.year >= 1970 else "" - except: - return iso_time_str # 转换失败时返回原始字符串 - # 测试示例 if __name__ == "__main__": diff --git a/app/sdk/pansou.py b/app/sdk/pansou.py index 726c3f3..f5ffb5a 100644 --- a/app/sdk/pansou.py +++ b/app/sdk/pansou.py @@ -57,6 +57,16 @@ class PanSou: # 解析结果:优先 results,然后 merged_by_type cleaned = [] + # 工具:移除标题中的链接 + def strip_links(text: str) -> str: + if not isinstance(text, str): + return text + s = text + import re + s = re.sub(r"https?://\S+", "", s) + s = re.sub(r"\bpan\.quark\.cn/\S+", "", s) + s = re.sub(r"\s+", " ", s).strip(" -|·,,::;;" + " ") + return s.strip() try: # 1) results: 主要结果数组,每个结果包含 title 和 links @@ -68,6 +78,7 @@ class PanSou: # 从 result_item 获取标题、内容和发布日期 title = result_item.get("title", "") + title = strip_links(title) content = result_item.get("content", "") datetime_str = result_item.get("datetime", "") # 获取发布日期 @@ -84,7 +95,7 @@ class PanSou: "content": content, "shareurl": url, "tags": [link_type] if link_type else (result_item.get("tags", []) or []), - "publish_date": datetime_str, # 添加发布日期字段 + "publish_date": datetime_str, # 原始时间(可能是 ISO) "source": "PanSou" # 添加来源标识 }) @@ -99,6 +110,7 @@ class PanSou: # 从 merged_by_type 获取链接信息 url = link.get("url", "") note = link.get("note", "") # 使用 note 字段作为标题 + note = strip_links(note) datetime_str = link.get("datetime", "") # 获取发布日期 if url: cleaned.append({ @@ -106,7 +118,7 @@ class PanSou: "content": note, # 如果没有 content,使用 note "shareurl": url, "tags": [cloud_type] if cloud_type else [], - "publish_date": datetime_str, # 添加发布日期字段 + "publish_date": datetime_str, # 原始时间 "source": "PanSou" # 添加来源标识 }) @@ -119,7 +131,7 @@ class PanSou: "content": item.get("content", ""), "shareurl": item.get("url", ""), "tags": item.get("tags", []) or [], - "publish_date": item.get("datetime", ""), # 添加发布日期字段 + "publish_date": item.get("datetime", ""), # 原始时间 "source": "PanSou" # 添加来源标识 }) @@ -152,37 +164,5 @@ class PanSou: if url and url not in seen_urls: seen_urls.add(url) unique_results.append(item) - - # 按发布日期排序:最新的在前 - def parse_datetime(datetime_str): - """解析日期时间字符串,返回可比较的时间戳""" - if not datetime_str: - return 0 # 没有日期的排在最后 - try: - from datetime import datetime, timezone, timedelta - # 尝试解析 ISO 8601 格式: 2025-07-28T20:43:27Z - dt = datetime.fromisoformat(datetime_str.replace('Z', '+00:00')) - return dt.timestamp() - except: - return 0 # 解析失败排在最后 - - def convert_to_cst(datetime_str): - """将 ISO 时间转换为中国标准时间 (CST)""" - if not datetime_str: - return "" - try: - from datetime import datetime, timezone, timedelta - dt = datetime.fromisoformat(datetime_str.replace('Z', '+00:00')) - dt_cst = dt.astimezone(timezone(timedelta(hours=8))) - return dt_cst.strftime("%Y-%m-%d %H:%M:%S") - except: - return datetime_str # 转换失败时返回原始字符串 - - # 转换时间为中国标准时间格式 - for item in unique_results: - if item.get("publish_date"): - item["publish_date"] = convert_to_cst(item["publish_date"]) - - # 注意:排序逻辑已移至全局,这里不再进行内部排序 - # 返回原始顺序的结果,由全局排序函数统一处理 + return {"success": True, "data": unique_results} diff --git a/app/templates/index.html b/app/templates/index.html index e6fb489..35b9d38 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -587,7 +587,7 @@

搜索来源

- +
@@ -1026,7 +1026,7 @@ {{ suggestion.taskname }} · {{ suggestion.shareurl.replace(/^https?:\/\/pan\.quark\.cn\/s\//, '') }} - + @@ -1954,7 +1954,7 @@ {{ suggestion.taskname }} · {{ suggestion.shareurl.replace(/^https?:\/\/pan\.quark\.cn\/s\//, '') }} - + @@ -4329,11 +4329,7 @@ const batchSize = 5; // 解析时间用于排序(降序:最新在前) - const getItemTs = (item) => { - const raw = item.publish_date || ''; - const ts = Date.parse(raw); - return isNaN(ts) ? 0 : ts; - }; + const getItemTs = (item) => this.parsePublishTs(item && item.publish_date); // 处理单个链接的函数 const processLink = (link) => { @@ -4462,11 +4458,7 @@ this.smart_param._hasShownInterimResults = false; // 结束前做一次排序,确保最终顺序正确 - const getItemTs = (item) => { - const raw = item.publish_date || ''; - const ts = Date.parse(raw); - return isNaN(ts) ? 0 : ts; - }; + const getItemTs = (item) => this.parsePublishTs(item && item.publish_date); validResults.sort((a, b) => getItemTs(b) - getItemTs(a)); // 更新搜索结果 @@ -6535,6 +6527,44 @@ const seconds = String(d.getSeconds()).padStart(2, '0'); return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`; }, + // 统一解析资源发布日期为时间戳 + parsePublishTs(raw) { + if (!raw) return 0; + const s = String(raw).trim(); + // YYYY-MM-DD HH:mm:ss + let m = /^\s*(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})\s*$/.exec(s); + if (m) { + const [, y, mo, d, h, mi, se] = m; + return new Date(Number(y), Number(mo) - 1, Number(d), Number(h), Number(mi), Number(se)).getTime(); + } + // YYYY-MM-DD + m = /^\s*(\d{4})-(\d{2})-(\d{2})\s*$/.exec(s); + if (m) { + const [, y, mo, d] = m; + return new Date(Number(y), Number(mo) - 1, Number(d), 0, 0, 0).getTime(); + } + // ISO 回退 + const ts = Date.parse(s); + return isNaN(ts) ? 0 : ts; + }, + // 规范化资源发布日期展示:将 ISO 格式(含 T/Z/偏移)转为 "YYYY-MM-DD HH:mm:ss" + formatPublishDate(value) { + if (!value) return ''; + const s = String(value).trim(); + // 已是标准格式则直接返回 + if (/^\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}$/.test(s)) return s; + // 优先匹配 ISO 主体部分 + const m = /^(\d{4})-(\d{2})-(\d{2})[T ](\d{2}):(\d{2}):(\d{2})/.exec(s); + if (m) { + const [, y, mo, d, h, mi, se] = m; + return `${y}-${mo}-${d} ${h}:${mi}:${se}`; + } + // 回退:简单替换T为空格并去除尾部Z/时区偏移 + let out = s.replace('T', ' '); + out = out.replace(/Z$/i, ''); + out = out.replace(/([+-]\d{2}:?\d{2})$/i, ''); + return out; + }, changeFolderPage(page) { if (page < 1) page = 1; if (page > this.fileManager.totalPages) page = this.fileManager.totalPages; From 8304d8e8fd3b2d9d46b962462807a00c19cfab90 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Wed, 27 Aug 2025 02:32:42 +0800 Subject: [PATCH 11/13] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E6=8E=A8=E9=80=81?= =?UTF-8?q?=E9=80=9A=E7=9F=A5=E7=B1=BB=E5=9E=8B=E9=80=89=E6=8B=A9=E5=8A=9F?= =?UTF-8?q?=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增推送通知类型配置选项 - 支持三种模式: - 完整信息(转存成功、转存失败、资源失效) - 仅成功信息(仅转存成功) - 排除失效信息(转存成功、转存失败) - 优化通知过滤逻辑,提升用户体验 --- app/run.py | 8 ++++++++ app/templates/index.html | 11 +++++++++++ quark_auto_save.py | 37 ++++++++++++++++++++++++++++++++----- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/app/run.py b/app/run.py index d4f168f..bdacb41 100644 --- a/app/run.py +++ b/app/run.py @@ -515,6 +515,10 @@ def get_data(): } } + # 初始化推送通知类型配置(如果不存在) + if "push_notify_type" not in data: + data["push_notify_type"] = "full" + # 初始化搜索来源默认结构 if "source" not in data or not isinstance(data.get("source"), dict): data["source"] = {} @@ -1730,6 +1734,10 @@ def init(): } } + # 初始化推送通知类型配置(如果不存在) + if "push_notify_type" not in config_data: + config_data["push_notify_type"] = "full" + # 同步更新任务的插件配置 sync_task_plugins_config() diff --git a/app/templates/index.html b/app/templates/index.html index 35b9d38..e9f145b 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -515,6 +515,16 @@ +
+
+ 推送通知 +
+ +
@@ -2134,6 +2144,7 @@ formData: { cookie: [], push_config: {}, + push_notify_type: 'full', media_servers: {}, tasklist: [], magic_regex: {}, diff --git a/quark_auto_save.py b/quark_auto_save.py index 0f94ed4..4d4afd7 100644 --- a/quark_auto_save.py +++ b/quark_auto_save.py @@ -829,6 +829,28 @@ def add_notify(text): # 防止重复添加相同的通知 if text in NOTIFYS: return text + + # 检查推送通知类型配置 + push_notify_type = CONFIG_DATA.get("push_notify_type", "full") + + # 如果设置为仅推送成功信息,则过滤掉失败和错误信息 + if push_notify_type == "success_only": + # 检查是否包含失败或错误相关的关键词 + failure_keywords = ["❌", "❗", "失败", "失效", "错误", "异常", "无效", "登录失败"] + if any(keyword in text for keyword in failure_keywords): + # 只打印到控制台,不添加到通知列表 + print(text) + return text + + # 如果设置为排除失效信息,则过滤掉资源失效信息,但保留转存失败信息 + elif push_notify_type == "exclude_invalid": + # 检查是否包含资源失效相关的关键词(主要是分享资源失效) + invalid_keywords = ["分享资源已失效", "分享详情获取失败", "分享为空", "文件已被分享者删除"] + if any(keyword in text for keyword in invalid_keywords): + # 只打印到控制台,不添加到通知列表 + print(text) + return text + NOTIFYS.append(text) print(text) return text @@ -4485,7 +4507,7 @@ def do_save(account, tasklist=[]): # 添加成功通知,带文件数量图标 # 这个通知会在下面的新逻辑中添加,这里注释掉 - # add_notify(f"✅《{task['taskname']}》添加追更:") + # add_notify(f"✅《{task['taskname']}》新增文件:") # add_notify(f"/{task['savepath']}") # 移除调试信息 @@ -4767,7 +4789,7 @@ def do_save(account, tasklist=[]): pass else: # 添加基本通知 - add_notify(f"✅《{task['taskname']}》添加追更:") + add_notify(f"✅《{task['taskname']}》新增文件:") add_notify(f"{re.sub(r'/{2,}', '/', f'/{task['savepath']}')}") # 修正首次运行时对子目录的处理 - 只有在首次运行且有新增的子目录时才显示子目录内容 @@ -5098,7 +5120,7 @@ def do_save(account, tasklist=[]): # 添加成功通知 - 修复问题:确保在有文件时添加通知 if display_files: - add_notify(f"✅《{task['taskname']}》添加追更:") + add_notify(f"✅《{task['taskname']}》新增文件:") add_notify(f"{re.sub(r'/{2,}', '/', f'/{task['savepath']}')}") @@ -5188,7 +5210,7 @@ def do_save(account, tasklist=[]): display_files = [file["file_name"] for file in file_nodes] # 添加成功通知 - add_notify(f"✅《{task['taskname']}》添加追更:") + add_notify(f"✅《{task['taskname']}》新增文件:") add_notify(f"{re.sub(r'/{2,}', '/', f'/{task['savepath']}')}") # 打印文件列表 @@ -5339,7 +5361,12 @@ def main(): if NOTIFYS: notify_body = "\n".join(NOTIFYS) print(f"===============推送通知===============") - send_ql_notify("【夸克自动追更】", notify_body) + send_ql_notify("【夸克自动转存】", notify_body) + print() + else: + # 如果没有通知内容,显示统一提示 + print(f"===============推送通知===============") + print("📭 本次运行没有新的转存,未推送通知") print() if cookie_form_file: # 更新配置 From 3ccaeeae15b2f3ab3caeddfb32a031ecf3d1d7ca Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Wed, 27 Aug 2025 22:23:01 +0800 Subject: [PATCH 12/13] =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=90=9C=E7=B4=A2=E7=BB=93=E6=9E=9C=E7=9A=84=E5=8E=BB=E9=87=8D?= =?UTF-8?q?=E9=80=BB=E8=BE=91=EF=BC=8C=E6=94=B9=E4=B8=BA=E5=8E=BB=E9=87=8D?= =?UTF-8?q?=E6=97=B6=E4=BF=9D=E7=95=99=E6=9C=80=E6=96=B0=E8=AE=B0=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - PanSou:按 shareurl 归并,保留 publish_date 最新记录 - CloudSaver:按 shareurl 归并,保留 datetime 最新记录 - 聚合层(/task_suggestions): - 首轮:按 shareurl 归并取最新;仅在无链接时用指纹兜底 - 二次:按“标准化标题+时间戳”归并(兼容多种时间格式) - 同秒平手:优先 CloudSaver,其次内容更丰富 - 最终仍按 publish_date 倒序展示 --- app/run.py | 109 ++++++++++++++++++++++++++++++++---------- app/sdk/cloudsaver.py | 71 ++++++++++++++++++++------- app/sdk/pansou.py | 47 ++++++++++++++---- 3 files changed, 178 insertions(+), 49 deletions(-) diff --git a/app/run.py b/app/run.py index bdacb41..b962236 100644 --- a/app/run.py +++ b/app/run.py @@ -987,12 +987,12 @@ def get_task_suggestions(): # 去重并统一时间字段为 publish_date # 规则: - # 1) shareurl 相同视为同一资源 - # 2) 当 taskname 与 publish_date 同时完全一致时,也视为同一资源(即使 shareurl 不同) - dedup = [] - seen_shareurls = set() - seen_title_date = set() - seen_fingerprints = set() + # 1) 首轮仅按 shareurl 归并:同一链接保留发布时间最新的一条(展示以该条为准) + # 2) 兜底(极少):无链接时按完整指纹(shareurl|title|date|source)归并 + # 3) 二次归并:对所有候选结果再按 标题+发布时间 做一次归并(无论 shareurl 是否相同),取最新 + # 注意:当发生归并冲突时,始终保留发布时间最新的记录 + dedup_map = {} # 按 shareurl 归并 + fingerprint_map = {} # 兜底:完整指纹归并(仅当缺失链接时) # 规范化工具 def normalize_shareurl(url: str) -> str: try: @@ -1028,6 +1028,29 @@ def get_task_suggestions(): return ds except Exception: return (date_str or "").strip() + # 解析时间供比较 + def to_ts(datetime_str): + if not datetime_str: + return 0 + try: + s = str(datetime_str).strip() + from datetime import datetime + try: + return datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timestamp() + except Exception: + pass + try: + return datetime.strptime(s, "%Y-%m-%d").timestamp() + except Exception: + pass + try: + s2 = s.replace('Z', '+00:00') + return datetime.fromisoformat(s2).timestamp() + except Exception: + return 0 + except Exception: + return 0 + for item in merged: if not isinstance(item, dict): continue @@ -1043,27 +1066,63 @@ def get_task_suggestions(): pubdate = normalize_date(item.get("publish_date") or "") source = (item.get("source") or "").strip() - # 条件1:按 shareurl 去重 - if shareurl and shareurl in seen_shareurls: - continue + timestamp = to_ts(pubdate) - # 条件2:标题 + 发布时间 同时一致则判定为同一资源 - title_date_key = f"{title}||{pubdate}" if title and pubdate else None - if title_date_key and title_date_key in seen_title_date: - continue - - # 条件3:完整指纹键(shareurl+title+date+source)去重,兜底完全相同的重复项 - fingerprint = f"{shareurl}|{title}|{pubdate}|{source}" - if fingerprint in seen_fingerprints: - continue - - # 记录已见键并保留该条 + # 条件1:按 shareurl 归并,取最新 if shareurl: - seen_shareurls.add(shareurl) - if title_date_key: - seen_title_date.add(title_date_key) - seen_fingerprints.add(fingerprint) - dedup.append(item) + existed = dedup_map.get(shareurl) + if not existed or to_ts(existed.get("publish_date")) < timestamp: + dedup_map[shareurl] = item + else: + # 条件2(兜底):完整指纹归并(极少发生),依然取最新 + fingerprint = f"{shareurl}|{title}|{pubdate}|{source}" + existed = fingerprint_map.get(fingerprint) + if not existed or to_ts(existed.get("publish_date")) < timestamp: + fingerprint_map[fingerprint] = item + + # 第一轮:汇总归并后的候选结果 + candidates = list(dedup_map.values()) + list(fingerprint_map.values()) + + # 第二轮:无论 shareurl 是否相同,再按 标题+发布时间 归并一次(使用时间戳作为键,兼容不同时间格式),保留最新 + final_map = {} + for item in candidates: + try: + t = normalize_title(item.get("taskname") or "") + d = normalize_date(item.get("publish_date") or "") + s = normalize_shareurl(item.get("shareurl") or "") + src = (item.get("source") or "").strip() + # 优先采用 标题+时间 作为归并键 + ts_val = to_ts(d) + if t and ts_val: + key = f"TD::{t}||{int(ts_val)}" + elif s: + key = f"URL::{s}" + else: + key = f"FP::{s}|{t}|{d}|{src}" + existed = final_map.get(key) + current_ts = to_ts(item.get("publish_date")) + if not existed: + final_map[key] = item + else: + existed_ts = to_ts(existed.get("publish_date")) + if current_ts > existed_ts: + final_map[key] = item + elif current_ts == existed_ts: + # 时间完全相同,使用确定性优先级打破平手 + source_priority = {"CloudSaver": 2, "PanSou": 1} + existed_pri = source_priority.get((existed.get("source") or "").strip(), 0) + current_pri = source_priority.get(src, 0) + if current_pri > existed_pri: + final_map[key] = item + elif current_pri == existed_pri: + # 进一步比较信息丰富度(content 长度) + if len(str(item.get("content") or "")) > len(str(existed.get("content") or "")): + final_map[key] = item + except Exception: + # 出现异常则跳过该项 + continue + + dedup = list(final_map.values()) # 仅在排序时对多种格式进行解析(优先解析 YYYY-MM-DD HH:mm:ss,其次 ISO) if dedup: diff --git a/app/sdk/cloudsaver.py b/app/sdk/cloudsaver.py index 802f185..05907b1 100644 --- a/app/sdk/cloudsaver.py +++ b/app/sdk/cloudsaver.py @@ -142,24 +142,63 @@ class CloudSaver: # 获取发布时间 - 采用与原始实现一致的方式 pubdate_iso = item.get("pubDate", "") # 原始时间字符串(可能为 ISO 或已是北京时间) pubdate = pubdate_iso # 不做时区转换,保留来源原始时间 - # 链接去重 - if link.get("link") not in link_array: - link_array.append(link.get("link")) - clean_results.append( - { - "shareurl": link.get("link"), - "taskname": title, - "content": content, - "datetime": pubdate, # 显示用时间 - "tags": item.get("tags", []), - "channel": item.get("channelId", ""), - "source": "CloudSaver" - } - ) + # 收集结果(不在此处去重,统一在末尾按最新归并) + clean_results.append( + { + "shareurl": link.get("link"), + "taskname": title, + "content": content, + "datetime": pubdate, # 显示用时间 + "tags": item.get("tags", []), + "channel": item.get("channelId", ""), + "source": "CloudSaver" + } + ) + # 去重:按 shareurl 归并,保留发布时间最新的记录 + def to_ts(date_str: str) -> float: + if not date_str: + return 0 + try: + s = str(date_str).strip() + from datetime import datetime + try: + return datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timestamp() + except Exception: + pass + try: + return datetime.strptime(s, "%Y-%m-%d").timestamp() + except Exception: + pass + try: + s2 = s.replace('Z', '+00:00') + return datetime.fromisoformat(s2).timestamp() + except Exception: + return 0 + except Exception: + return 0 + + by_url = {} + for item in clean_results: + try: + url = item.get("shareurl", "") + if not url: + continue + existed = by_url.get(url) + if not existed: + by_url[url] = item + else: + # 比较 datetime(CloudSaver清洗阶段时间字段名为 datetime) + if to_ts(item.get("datetime")) > to_ts(existed.get("datetime")): + by_url[url] = item + except Exception: + continue + + unique_results = list(by_url.values()) + # 注意:排序逻辑已移至全局,这里不再进行内部排序 - # 返回原始顺序的结果,由全局排序函数统一处理 - return clean_results + # 返回归并后的结果,由全局排序函数统一处理 + return unique_results # 测试示例 if __name__ == "__main__": diff --git a/app/sdk/pansou.py b/app/sdk/pansou.py index f5ffb5a..2adf95d 100644 --- a/app/sdk/pansou.py +++ b/app/sdk/pansou.py @@ -156,13 +156,44 @@ class PanSou: if not cleaned: return {"success": False, "message": "PanSou搜索无夸克网盘结果"} - # 去重:按 shareurl 去重 - seen_urls = set() - unique_results = [] - for item in cleaned: - url = item.get("shareurl", "") - if url and url not in seen_urls: - seen_urls.add(url) - unique_results.append(item) + # 去重:按 shareurl 归并,保留发布时间最新的记录 + def to_ts(date_str: str) -> float: + if not date_str: + return 0 + try: + s = str(date_str).strip() + from datetime import datetime + try: + return datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timestamp() + except Exception: + pass + try: + return datetime.strptime(s, "%Y-%m-%d").timestamp() + except Exception: + pass + try: + s2 = s.replace('Z', '+00:00') + return datetime.fromisoformat(s2).timestamp() + except Exception: + return 0 + except Exception: + return 0 + by_url = {} + for item in cleaned: + try: + url = item.get("shareurl", "") + if not url: + continue + existed = by_url.get(url) + if not existed: + by_url[url] = item + else: + # 比较 publish_date(若不存在则视为0) + if to_ts(item.get("publish_date")) > to_ts(existed.get("publish_date")): + by_url[url] = item + except Exception: + continue + + unique_results = list(by_url.values()) return {"success": True, "data": unique_results} From 5216fa981da680130bf2d30cea6219e847968821 Mon Sep 17 00:00:00 2001 From: x1ao4 Date: Wed, 27 Aug 2025 23:33:53 +0800 Subject: [PATCH 13/13] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B5=84=E6=BA=90?= =?UTF-8?q?=E6=90=9C=E7=B4=A2=E7=BB=93=E6=9E=9C=E5=9C=A8=E5=A4=A7=E6=A0=B7?= =?UTF-8?q?=E6=9C=AC=E4=B8=8B=E8=B6=85=E6=97=B6=E5=90=8E=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E8=BF=BD=E5=8A=A0=EF=BC=8C=E5=AF=BC=E8=87=B4=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E6=B8=B2=E6=9F=93=E4=B8=8E=E8=AE=A1=E6=95=B0=E8=86=A8=E8=83=80?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 前端:引入搜索 “会话号 + validating” 双重校验,超时立即取消当前会话,并在批处理/渲染前校验,阻断超时后的继续写入;保留稳定 v-for key 确保渲染一致性 - 后端:`get_detail` 增强容错,避免无 `code`/网络异常引发 KeyError;`/get_share_detail` 统一错误返回结构,前端稳定处理 --- app/run.py | 4 ++++ app/templates/index.html | 34 +++++++++++++++++++++++++++++----- quark_auto_save.py | 37 +++++++++++++++++++++++++++++-------- 3 files changed, 62 insertions(+), 13 deletions(-) diff --git a/app/run.py b/app/run.py index b962236..abe6ed3 100644 --- a/app/run.py +++ b/app/run.py @@ -1200,6 +1200,10 @@ def get_share_detail(): if not is_sharing: return jsonify({"success": False, "data": {"error": stoken}}) share_detail = account.get_detail(pwd_id, stoken, pdir_fid, _fetch_share=1) + # 统一错误返回,避免前端崩溃 + if isinstance(share_detail, dict) and share_detail.get("error"): + return jsonify({"success": False, "data": {"error": share_detail.get("error")}}) + share_detail["paths"] = paths share_detail["stoken"] = stoken diff --git a/app/templates/index.html b/app/templates/index.html index e9f145b..98acf0f 100644 --- a/app/templates/index.html +++ b/app/templates/index.html @@ -1032,7 +1032,7 @@ -