mirror of
https://github.com/LmeSzinc/StarRailCopilot.git
synced 2024-11-16 06:25:24 +00:00
012fffbb31
* Add: daily_request_recognition; get_daily_rewards * Fix: typo * Fix: typo * Upd: delete eval; use one loop to handle 5 active point rewards; * Upd: change DAILY_QUEST_GOTO/REWARD pattern; update swipe private method name; extract keyword compare method * Upd: move warning to single page recognition * Upd: merge from main * Add: methods that load daily quests keywords from QuestData.json * Upd: avoid read TextMap twice * Upd: revert Keyword.find method * Add: preprocess of keyword extract; after_process of daily quest ocr * Upd: move assets to daily/reward * Upd: simplify ocr result replacement
132 lines
4.5 KiB
Python
132 lines
4.5 KiB
Python
import os
|
|
import re
|
|
import typing as t
|
|
from functools import cached_property
|
|
|
|
from module.base.code_generator import CodeGenerator
|
|
from module.config.utils import read_file
|
|
from module.logger import logger
|
|
from module.ocr.keyword import text_to_variable
|
|
|
|
UI_LANGUAGES = ['cn', 'cht', 'en', 'jp']
|
|
|
|
|
|
class TextMap:
|
|
DATA_FOLDER = ''
|
|
|
|
def __init__(self, lang: str):
|
|
self.lang = lang
|
|
|
|
@cached_property
|
|
def data(self) -> dict[int, str]:
|
|
if not TextMap.DATA_FOLDER:
|
|
logger.critical('`TextMap.DATA_FOLDER` is empty, please set it to your path to StarRailData')
|
|
exit(1)
|
|
file = os.path.join(TextMap.DATA_FOLDER, 'TextMap', f'TextMap{self.lang.upper()}.json')
|
|
data = {}
|
|
for id_, text in read_file(file).items():
|
|
data[int(id_)] = text
|
|
return data
|
|
|
|
def find(self, name: t.Union[int, str]) -> tuple[int, str]:
|
|
"""
|
|
Args:
|
|
name:
|
|
|
|
Returns:
|
|
text id (hash in TextMap)
|
|
text
|
|
"""
|
|
if isinstance(name, int) or (isinstance(name, str) and name.isdigit()):
|
|
name = int(name)
|
|
try:
|
|
return name, self.data[name]
|
|
except KeyError:
|
|
pass
|
|
|
|
name = str(name)
|
|
for row_id, row_name in self.data.items():
|
|
if row_id >= 0 and row_name == name:
|
|
return row_id, row_name
|
|
for row_id, row_name in self.data.items():
|
|
if row_name == name:
|
|
return row_id, row_name
|
|
logger.error(f'Cannot find name: "{name}" in language {self.lang}')
|
|
return 0, ''
|
|
|
|
|
|
def replace_templates(text: str) -> str:
|
|
"""
|
|
Replace templates in data to make sure it equals to what is shown in game
|
|
|
|
Examples:
|
|
replace_templates("Complete Echo of War #4 time(s)")
|
|
== "Complete Echo of War 1 time(s)"
|
|
"""
|
|
text = re.sub(r'#4', '1', text)
|
|
text = re.sub(r'</?\w+>', '', text)
|
|
return text
|
|
|
|
|
|
class KeywordExtract:
|
|
def __init__(self):
|
|
self.text_map: dict[str, TextMap] = {lang: TextMap(lang) for lang in UI_LANGUAGES}
|
|
self.keywords_id: list[int] = []
|
|
|
|
def find_keyword(self, keyword, lang):
|
|
text_map = self.text_map[lang]
|
|
return text_map.find(keyword)
|
|
|
|
def load_keywords(self, keywords: list[str], lang='cn'):
|
|
text_map = self.text_map[lang]
|
|
self.keywords_id = [text_map.find(keyword)[0] for keyword in keywords]
|
|
self.keywords_id = [keyword for keyword in self.keywords_id if keyword != 0]
|
|
|
|
def write_keywords(
|
|
self,
|
|
keyword_class,
|
|
output_file: str
|
|
):
|
|
"""
|
|
Args:
|
|
keyword_class:
|
|
keyword_import:
|
|
output_file:
|
|
"""
|
|
gen = CodeGenerator()
|
|
gen.Import(f"""
|
|
from .classes import {keyword_class}
|
|
""")
|
|
gen.CommentAutoGenerage('dev_tools.keyword_extract')
|
|
for index, keyword in enumerate(self.keywords_id):
|
|
_, en = self.find_keyword(keyword, lang='en')
|
|
en = text_to_variable(en)
|
|
with gen.Object(key=en, object_class=keyword_class):
|
|
gen.ObjectAttr(key='id', value=index + 1)
|
|
for lang in UI_LANGUAGES:
|
|
gen.ObjectAttr(key=lang, value=replace_templates(self.find_keyword(keyword, lang=lang)[1]))
|
|
|
|
gen.write(output_file)
|
|
|
|
def load_daily_quests_keywords(self, lang='cn'):
|
|
daily_quest = read_file(os.path.join(TextMap.DATA_FOLDER, 'ExcelOutput', 'DailyQuest.json'))
|
|
quest_data = read_file(os.path.join(TextMap.DATA_FOLDER, 'ExcelOutput', 'QuestData.json'))
|
|
quests_hash = [quest_data[quest_id]["QuestTitle"]["Hash"] for quest_id in daily_quest]
|
|
quest_keywords = [self.text_map[lang].find(quest_hash)[1] for quest_hash in quests_hash]
|
|
self.load_keywords(quest_keywords, lang)
|
|
|
|
|
|
def generate():
|
|
ex = KeywordExtract()
|
|
ex.load_keywords(['模拟宇宙', '拟造花萼(金)', '拟造花萼(赤)', '凝滞虚影', '侵蚀隧洞', '历战余响', '忘却之庭'])
|
|
ex.write_keywords(keyword_class='DungeonNav', output_file='./tasks/dungeon/keywords/nav.py')
|
|
ex.load_keywords(['行动摘要', '生存索引', '每日实训'])
|
|
ex.write_keywords(keyword_class='DungeonTab', output_file='./tasks/dungeon/keywords/tab.py')
|
|
ex.load_daily_quests_keywords()
|
|
ex.write_keywords(keyword_class='DailyQuest', output_file='./tasks/daily/keywords/daily_quest.py')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
TextMap.DATA_FOLDER = r''
|
|
generate()
|