forked from sk1982/actaeon
scripts
This commit is contained in:
parent
762a140f4e
commit
c4fdb7be58
4
scripts/.db-migraterc
Normal file
4
scripts/.db-migraterc
Normal file
@ -0,0 +1,4 @@
|
||||
{
|
||||
"table": "actaeon_migrations",
|
||||
"sql-file": true
|
||||
}
|
370
scripts/.gitignore
vendored
Normal file
370
scripts/.gitignore
vendored
Normal file
@ -0,0 +1,370 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
.cache
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# AWS User-specific
|
||||
.idea/**/aws.xml
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
# *.iml
|
||||
# *.ipr
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# SonarLint plugin
|
||||
.idea/sonarlint/
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
assets/
|
42
scripts/asset-extract.py
Normal file
42
scripts/asset-extract.py
Normal file
@ -0,0 +1,42 @@
|
||||
import argparse
|
||||
import traceback
|
||||
from extracters import get_extracters
|
||||
from multiprocessing import cpu_count
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from multiprocessing import Manager
|
||||
|
||||
def run(event, func, args):
|
||||
if event.is_set():
|
||||
return
|
||||
|
||||
try:
|
||||
func(*args)
|
||||
except:
|
||||
event.set()
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', help='asset config', default='assets.yaml')
|
||||
parser.add_argument('--processes', type=int, default=cpu_count(), help='number of processes to use')
|
||||
parser.add_argument('--out-dir', '--output', help='output directory', default='../public/assets')
|
||||
subparsers = parser.add_subparsers(dest='game', help='game extracter to use', required=True)
|
||||
extracters = get_extracters()
|
||||
for name, extracter in extracters.items():
|
||||
extracter.register(subparsers.add_parser(name))
|
||||
|
||||
args = parser.parse_args()
|
||||
extracter = extracters[args.game](**vars(args))
|
||||
|
||||
if args.processes == 1:
|
||||
for func, *args in extracter.extract():
|
||||
func(*args)
|
||||
else:
|
||||
with Manager() as manager:
|
||||
event = manager.Event()
|
||||
with ProcessPoolExecutor(args.processes) as executor:
|
||||
for func, *args in extracter.extract():
|
||||
executor.submit(run, event, func, args)
|
||||
|
||||
extracter.cleanup()
|
29
scripts/assets.yaml
Normal file
29
scripts/assets.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
# path to ffmpeg if it is not on the path
|
||||
ffmpeg_path: 'ffmpeg'
|
||||
# path to vgmstream if it is not on the path
|
||||
# required for some audio conversions
|
||||
# https://github.com/vgmstream/vgmstream
|
||||
vgmstream_path: 'vgmstream-cli'
|
||||
|
||||
# options for music
|
||||
music:
|
||||
enable: true
|
||||
extension: .opus
|
||||
ffmpeg_args: ['-b:a', '64k']
|
||||
|
||||
# options for music jacket images
|
||||
jackets:
|
||||
enable: true
|
||||
extension: .webp
|
||||
|
||||
# options for other images
|
||||
images:
|
||||
enable: true
|
||||
extension: .webp
|
||||
ffmpeg_args: ['-preset', 'drawing']
|
||||
|
||||
# options for other audio
|
||||
audio:
|
||||
enable: true
|
||||
extension: .opus
|
||||
ffmpeg_args: [ '-b:a', '64k' ]
|
30
scripts/db-import.py
Normal file
30
scripts/db-import.py
Normal file
@ -0,0 +1,30 @@
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from urllib.parse import urlparse
|
||||
import mariadb
|
||||
import os
|
||||
from importers import get_importers
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--env', '-e', default='../.env.local', help='env file to load DATABASE_URL from')
|
||||
subparsers = parser.add_subparsers(dest='game', help='game importer to use', required=True)
|
||||
importers = get_importers()
|
||||
for name, importer in importers.items():
|
||||
importer.register(subparsers.add_parser(name))
|
||||
|
||||
args = parser.parse_args()
|
||||
load_dotenv(args.env)
|
||||
parsed = urlparse(os.getenv('DATABASE_URL'))
|
||||
conn = mariadb.connect(
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 3306,
|
||||
database=parsed.path[1:]
|
||||
)
|
||||
|
||||
importer = importers[args.game](conn=conn, **vars(args))
|
||||
importer.do_import()
|
||||
conn.commit()
|
||||
conn.close()
|
2
scripts/extracters/__init__.py
Normal file
2
scripts/extracters/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .extracter import get_extracters
|
||||
from .chuni import Chuni
|
72
scripts/extracters/chuni.py
Normal file
72
scripts/extracters/chuni.py
Normal file
@ -0,0 +1,72 @@
|
||||
from pathlib import Path
|
||||
from itertools import chain
|
||||
from PyCriCodecs import AWB, ACB, HCA
|
||||
|
||||
from .extracter import Extracter, add_extracter
|
||||
|
||||
|
||||
class Chuni(Extracter):
|
||||
def __init__(self, *, data_dir, opt_dir, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.data_dir = Path(data_dir)
|
||||
self.opt_dir = Path(opt_dir)
|
||||
self.out_dir /= 'chuni'
|
||||
|
||||
def process_image(self, file: Path, out: Path, media_type):
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.ffmpeg(['-i', file], media_type, out)
|
||||
print(file)
|
||||
|
||||
def process_music(self, file: Path, out: Path):
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
awb = AWB(str(file))
|
||||
# omnimix hca's cannot be decoded by PyCriCodecs
|
||||
self.ffmpeg(['-i', self.vgmstream(next(awb.getfiles()))], 'music', out)
|
||||
print(file)
|
||||
|
||||
def process_audio(self, hca: bytes, out: Path):
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.ffmpeg(['-i', self.vgmstream(hca)], 'audio', out)
|
||||
print(out)
|
||||
|
||||
def extract_images(self):
|
||||
for folder_name, output_folder in (
|
||||
('avatarAccessory', 'avatar'),
|
||||
('ddsImage', 'character'),
|
||||
('mapIcon', 'map-icon'),
|
||||
('namePlate', 'name-plate'),
|
||||
('systemVoice', 'system-voice-icon')
|
||||
):
|
||||
for file in chain(self.data_dir.glob(f'A000/{folder_name}/*/*.dds'),
|
||||
self.opt_dir.glob(f'*/{folder_name}/*/*.dds')):
|
||||
yield self.process_image, file, self.out_dir / output_folder / file.name, 'images'
|
||||
|
||||
texture = self.data_dir / 'surfboard' / 'texture'
|
||||
yield self.process_image, texture / 'CHU_UI_Common_Avatar_body_00.dds', self.out_dir / 'avatar' / 'CHU_UI_Common_Avatar_body_00.dds', 'images'
|
||||
yield self.process_image, texture / 'CHU_UI_title_rank_00_v10.dds', self.out_dir / 'trophy' / 'CHU_UI_title_rank_00_v10.dds', 'images'
|
||||
|
||||
def extract_jacket(self):
|
||||
for file in chain(self.data_dir.glob('A000/music/*/*.dds'),
|
||||
self.opt_dir.glob('*/music/*/*.dds')):
|
||||
yield self.process_image, file, self.out_dir / 'jacket' / file.name, 'jackets'
|
||||
|
||||
def extract_music(self):
|
||||
for file in chain(self.data_dir.glob('A000/cueFile/*/music*.awb'),
|
||||
self.opt_dir.glob('*/cueFile/*/music*.awb')):
|
||||
yield self.process_music, file, self.out_dir / 'music' / file.name
|
||||
|
||||
def extract_audio(self):
|
||||
for file in chain(self.data_dir.glob('A000/cueFile/*/systemvoice*.acb'),
|
||||
self.opt_dir.glob('*/cueFile/*/systemvoice*.acb')):
|
||||
acb = ACB(str(file))
|
||||
names = self.acb_filenames(acb)
|
||||
for i, data in enumerate(acb.awb.getfiles()):
|
||||
yield self.process_audio, data, self.out_dir / 'system-voice' / f'{file.stem}_{names[0][i][1]}'
|
||||
|
||||
@staticmethod
|
||||
def register(parser):
|
||||
parser.add_argument('--data-dir', help='data directory (containing A000)', required=True)
|
||||
parser.add_argument('--opt-dir', help='opt directory (containing A001, etc.)', required=True)
|
||||
|
||||
|
||||
add_extracter(Chuni)
|
167
scripts/extracters/extracter.py
Normal file
167
scripts/extracters/extracter.py
Normal file
@ -0,0 +1,167 @@
|
||||
from PyCriCodecs import ACB
|
||||
import sys
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
import argparse
|
||||
import subprocess
|
||||
import yaml
|
||||
import shutil
|
||||
import string
|
||||
import random
|
||||
import struct
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class Extracter:
|
||||
def __init__(self, *, config, out_dir, **kwargs):
|
||||
with open(config, 'r') as f:
|
||||
self.config = yaml.safe_load(f)
|
||||
self.music_enabled = self.config['music']['enable']
|
||||
self.jackets_enabled = self.config['jackets']['enable']
|
||||
self.images_enabled = self.config['images']['enable']
|
||||
self.audio_enabled = self.config['audio']['enable']
|
||||
self.out_dir = Path(out_dir)
|
||||
self.tmp_dir = self.out_dir / 'tmp'
|
||||
|
||||
def get_tmp(self, ext='.dat'):
|
||||
self.tmp_dir.mkdir(parents=True, exist_ok=True)
|
||||
while True:
|
||||
name = ''.join(random.choices(string.ascii_letters + string.digits + '_-+', k=32))
|
||||
path = self.tmp_dir / (name + ext)
|
||||
if not path.exists():
|
||||
try:
|
||||
path.touch(exist_ok=False)
|
||||
return path
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
def vgmstream(self, input_file):
|
||||
is_tmp = False
|
||||
if type(input_file) == bytes:
|
||||
tmp = self.get_tmp('.hca')
|
||||
is_tmp = True
|
||||
with open(tmp, 'wb') as f:
|
||||
f.write(input_file)
|
||||
input_file = tmp
|
||||
|
||||
args = [
|
||||
self.config['vgmstream_path'],
|
||||
'-p',
|
||||
input_file
|
||||
]
|
||||
|
||||
res = subprocess.run(args, capture_output=True)
|
||||
|
||||
if res.returncode:
|
||||
sys.stderr.buffer.write(res.stderr)
|
||||
raise RuntimeError(f'vgmstream exited with code {res.returncode}')
|
||||
|
||||
if is_tmp:
|
||||
input_file.unlink()
|
||||
|
||||
return res.stdout
|
||||
|
||||
def ffmpeg(self, input_args, media_type, output_name):
|
||||
buffer_input = None
|
||||
input_args = list(input_args)
|
||||
|
||||
for i, arg in enumerate(input_args):
|
||||
if type(arg) == bytes:
|
||||
if buffer_input is not None:
|
||||
raise ValueError('more than one buffer passed to ffmpeg input')
|
||||
buffer_input = arg
|
||||
input_args[i] = '-'
|
||||
else:
|
||||
input_args[i] = str(arg)
|
||||
|
||||
args = [
|
||||
self.config['ffmpeg_path'],
|
||||
'-y',
|
||||
'-hide_banner',
|
||||
'-loglevel',
|
||||
'error',
|
||||
*input_args,
|
||||
*self.config[media_type].get('ffmpeg_args', []),
|
||||
Path(output_name).with_suffix(self.config[media_type]['extension'])
|
||||
]
|
||||
|
||||
if buffer_input:
|
||||
res = subprocess.run(args, capture_output=True, input=buffer_input)
|
||||
else:
|
||||
res = subprocess.run(args, capture_output=True)
|
||||
|
||||
if res.returncode:
|
||||
sys.stderr.buffer.write(res.stderr)
|
||||
raise RuntimeError(f'ffmpeg exited with code {res.returncode}')
|
||||
|
||||
def cleanup(self):
|
||||
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
||||
|
||||
@staticmethod
|
||||
def acb_filenames(acb: ACB):
|
||||
awb_dict: dict[int, dict[int, tuple[int, str]]] = defaultdict(dict)
|
||||
|
||||
payload = acb.payload[0]
|
||||
for name_entry in payload['CueNameTable']:
|
||||
name = name_entry['CueName'][1]
|
||||
index = name_entry['CueIndex'][1]
|
||||
|
||||
sequence = payload['SequenceTable'][index]
|
||||
num_tracks = sequence['NumTracks'][1]
|
||||
if not num_tracks:
|
||||
continue
|
||||
|
||||
track_indexes = struct.unpack(f'>{num_tracks}H', sequence['TrackIndex'][1])
|
||||
waveforms = []
|
||||
for track_index in track_indexes:
|
||||
waveform_index = int.from_bytes(payload['SynthTable'][track_index]['ReferenceItems'][1][2:], 'big')
|
||||
waveforms.append(payload['WaveformTable'][waveform_index])
|
||||
|
||||
for i, waveform in enumerate(waveforms):
|
||||
awb_index = waveform['StreamAwbPortNo'][1]
|
||||
stream_index = waveform['StreamAwbId'][1]
|
||||
awb_dict[awb_index][stream_index] = i, name
|
||||
|
||||
return awb_dict
|
||||
|
||||
@abstractmethod
|
||||
def extract_jacket(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def extract_images(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def extract_music(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def extract_audio(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def extract(self):
|
||||
if self.jackets_enabled:
|
||||
yield from self.extract_jacket()
|
||||
if self.images_enabled:
|
||||
yield from self.extract_images()
|
||||
if self.music_enabled:
|
||||
yield from self.extract_music()
|
||||
if self.audio_enabled:
|
||||
yield from self.extract_audio()
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def register(parser: argparse.ArgumentParser):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
extracters: dict[str, Extracter] = {}
|
||||
|
||||
|
||||
def add_extracter(extracter):
|
||||
extracters[extracter.__name__.lower()] = extracter
|
||||
|
||||
|
||||
def get_extracters():
|
||||
return extracters
|
2
scripts/importers/__init__.py
Normal file
2
scripts/importers/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .importer import get_importers
|
||||
from .chuni import Chuni
|
77
scripts/importers/chuni.py
Normal file
77
scripts/importers/chuni.py
Normal file
@ -0,0 +1,77 @@
|
||||
from pathlib import Path
|
||||
from xml.etree import ElementTree as ET
|
||||
from itertools import chain
|
||||
from .importer import Importer, add_importer
|
||||
|
||||
BASE_XPATHS = [
|
||||
('./name/id', int),
|
||||
'./name/str',
|
||||
'./sortName',
|
||||
'./image/path'
|
||||
]
|
||||
|
||||
class Chuni(Importer):
|
||||
def __init__(self, *, data_dir, opt_dir, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.data_dir = Path(data_dir)
|
||||
self.opt_dir = Path(opt_dir)
|
||||
|
||||
def get_xml(self, folder, name, *xpaths):
|
||||
rows = []
|
||||
|
||||
for file in chain(self.data_dir.glob(f'A000/{folder}/*/{name}.xml'),
|
||||
self.opt_dir.glob(f'*/{folder}/*/{name}.xml')):
|
||||
print(file)
|
||||
tree = ET.parse(file)
|
||||
data = []
|
||||
for xpath in xpaths:
|
||||
if type(xpath) == tuple:
|
||||
xpath, datatype = xpath
|
||||
else:
|
||||
datatype = str
|
||||
data.append(datatype(tree.find(xpath).text))
|
||||
rows.append(tuple(data))
|
||||
|
||||
return rows
|
||||
|
||||
def import_map_icon(self):
|
||||
self.cur.executemany(
|
||||
'''INSERT INTO actaeon_chuni_static_map_icon(id, name, sortName, imagePath)
|
||||
VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE name=name, sortName=sortName, imagePath=imagePath''',
|
||||
self.get_xml('mapIcon', 'MapIcon', *BASE_XPATHS)
|
||||
)
|
||||
|
||||
def import_name_plate(self):
|
||||
self.cur.executemany(
|
||||
'''INSERT INTO actaeon_chuni_static_name_plate(id, name, sortName, imagePath)
|
||||
VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE name=name, sortName=sortName, imagePath=imagePath''',
|
||||
self.get_xml('namePlate', 'NamePlate', *BASE_XPATHS)
|
||||
)
|
||||
|
||||
def import_system_voice(self):
|
||||
self.cur.executemany(
|
||||
'''INSERT INTO actaeon_chuni_static_system_voice(id, name, sortName, imagePath, cuePath)
|
||||
VALUES (%s, %s, %s, %s, %s)
|
||||
ON DUPLICATE KEY UPDATE name=name, sortName=sortName, imagePath=imagePath, cuePath=cuePath''',
|
||||
self.get_xml('systemVoice', 'SystemVoice', *BASE_XPATHS, './cue/str')
|
||||
)
|
||||
|
||||
def import_trophies(self):
|
||||
self.cur.executemany(
|
||||
'''INSERT INTO actaeon_chuni_static_trophies(id, name, rareType, explainText)
|
||||
VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE name=name, rareType=rareType, explainText=explainText''',
|
||||
self.get_xml('trophy', 'Trophy', ('./name/id', int), './name/str', ('./rareType', int), './explainText')
|
||||
)
|
||||
|
||||
def do_import(self):
|
||||
self.import_map_icon()
|
||||
self.import_name_plate()
|
||||
self.import_system_voice()
|
||||
self.import_trophies()
|
||||
|
||||
@staticmethod
|
||||
def register(parser):
|
||||
parser.add_argument('--data-dir', help='data directory (containing A000)', required=True)
|
||||
parser.add_argument('--opt-dir', help='opt directory (containing A001, etc.)', required=True)
|
||||
|
||||
add_importer(Chuni)
|
30
scripts/importers/importer.py
Normal file
30
scripts/importers/importer.py
Normal file
@ -0,0 +1,30 @@
|
||||
from abc import abstractmethod
|
||||
import mariadb
|
||||
import argparse
|
||||
|
||||
|
||||
class Importer:
|
||||
def __init__(self, *, conn: mariadb.Connection, **kwargs):
|
||||
conn.autocommit = False
|
||||
self.conn = conn
|
||||
self.cur = conn.cursor()
|
||||
|
||||
@abstractmethod
|
||||
def do_import(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def register(parser: argparse.ArgumentParser):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
importers: dict[str, Importer] = {}
|
||||
|
||||
|
||||
def add_importer(importer):
|
||||
importers[importer.__name__.lower()] = importer
|
||||
|
||||
|
||||
def get_importers():
|
||||
return importers
|
BIN
scripts/requirements.txt
Normal file
BIN
scripts/requirements.txt
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user