1663 lines
58 KiB
Python
1663 lines
58 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Code Map Generator for Godot Projects - JSON Only, Minified Output
|
|
|
|
Generates machine-readable code intelligence in JSON format for LLM development.
|
|
|
|
Usage:
|
|
python tools/generate_code_map.py # Generate all maps in .llm/
|
|
python tools/generate_code_map.py --verbose # Show progress
|
|
python tools/generate_code_map.py --output custom.json # Custom output
|
|
|
|
Output:
|
|
.llm/code_map_api.json - Function signatures, types, parameters
|
|
.llm/code_map_architecture.json - Autoloads, patterns, structure
|
|
.llm/code_map_flows.json - Signal chains, scene transitions
|
|
.llm/code_map_security.json - Validation patterns, error handling
|
|
.llm/code_map_assets.json - Asset dependencies, licenses
|
|
.llm/code_map_metadata.json - Code quality metrics
|
|
"""
|
|
|
|
import argparse
|
|
import json
|
|
import re
|
|
import shutil
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
|
|
# Constants
|
|
DEFAULT_LLM_DIR = ".llm"
|
|
DEFAULT_DOCS_DIR = "docs/generated"
|
|
|
|
|
|
def write_json_minified(data: dict, path: Path) -> None:
|
|
"""Write minified JSON (no whitespace) for optimal token efficiency"""
|
|
with open(path, "w", encoding="utf-8") as f:
|
|
json.dump(data, f, separators=(",", ":"), ensure_ascii=False)
|
|
|
|
|
|
def flush_directory(directory: Path, verbose: bool = False) -> None:
|
|
"""Remove all contents of a directory while preserving the directory itself"""
|
|
if directory.exists() and directory.is_dir():
|
|
for item in directory.iterdir():
|
|
if item.is_file():
|
|
item.unlink()
|
|
if verbose:
|
|
print(f" Removed file: {item.name}")
|
|
elif item.is_dir():
|
|
shutil.rmtree(item)
|
|
if verbose:
|
|
print(f" Removed directory: {item.name}/")
|
|
|
|
|
|
def ensure_output_dirs(root: Path) -> tuple[Path, Path]:
|
|
"""Create output directories if they don't exist"""
|
|
llm_dir = root / DEFAULT_LLM_DIR
|
|
docs_dir = root / DEFAULT_DOCS_DIR
|
|
llm_dir.mkdir(exist_ok=True)
|
|
docs_dir.mkdir(exist_ok=True)
|
|
return llm_dir, docs_dir
|
|
|
|
|
|
class GDScriptParser:
|
|
"""Parser for GDScript (.gd) files with flat, simple logic"""
|
|
|
|
def __init__(self, file_path: Path, content: str):
|
|
self.file_path = file_path
|
|
self.content = content
|
|
self.lines = content.split("\n")
|
|
|
|
def parse(self) -> dict[str, Any]:
|
|
"""Parse GDScript file - early return for empty content"""
|
|
if not self.content:
|
|
return {}
|
|
|
|
return {
|
|
"path": str(self.file_path),
|
|
"extends": self._get_extends(),
|
|
"class_name": self._get_class_name(),
|
|
"signals": self._parse_signals(),
|
|
"constants": self._parse_constants(),
|
|
"enums": self._parse_enums(),
|
|
"export_vars": self._parse_export_vars(),
|
|
"variables": self._parse_variables(),
|
|
"functions": self._parse_functions(),
|
|
"preloads": self._parse_preloads(),
|
|
"autoload_refs": self._get_autoload_refs(),
|
|
"signal_emissions": self._parse_signal_emissions(),
|
|
"input_actions": self._get_input_actions(),
|
|
}
|
|
|
|
def _get_extends(self) -> str | None:
|
|
"""Extract extends declaration"""
|
|
for line in self.lines:
|
|
stripped = line.strip()
|
|
if stripped.startswith("extends "):
|
|
return stripped[8:].strip()
|
|
return None
|
|
|
|
def _get_class_name(self) -> str | None:
|
|
"""Extract class_name declaration"""
|
|
for line in self.lines:
|
|
stripped = line.strip()
|
|
if stripped.startswith("class_name "):
|
|
return stripped[11:].strip()
|
|
return None
|
|
|
|
def _parse_signals(self) -> list[dict[str, Any]]:
|
|
"""Extract signal definitions with parameters"""
|
|
signals = []
|
|
pattern = re.compile(r"^\s*signal\s+(\w+)(?:\((.*?)\))?\s*$", re.MULTILINE)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
signal_name = match.group(1)
|
|
params_str = match.group(2) or ""
|
|
|
|
parameters = []
|
|
if params_str:
|
|
for param in params_str.split(","):
|
|
param = param.strip()
|
|
if not param:
|
|
continue
|
|
|
|
if ":" in param:
|
|
parts = param.split(":", 1)
|
|
parameters.append(
|
|
{"name": parts[0].strip(), "type": parts[1].strip()}
|
|
)
|
|
else:
|
|
parameters.append({"name": param, "type": "Variant"})
|
|
|
|
signals.append({"name": signal_name, "parameters": parameters})
|
|
|
|
return signals
|
|
|
|
def _parse_constants(self) -> list[dict[str, Any]]:
|
|
"""Extract const declarations"""
|
|
constants = []
|
|
pattern = re.compile(
|
|
r"^\s*const\s+([A-Z_][A-Z0-9_]*)\s*(?::\s*([^=]+))?\s*=\s*(.+?)(?:\s*#.*)?$",
|
|
re.MULTILINE,
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
constants.append(
|
|
{
|
|
"name": match.group(1),
|
|
"type": match.group(2).strip() if match.group(2) else None,
|
|
"value": match.group(3).strip(),
|
|
}
|
|
)
|
|
|
|
return constants
|
|
|
|
def _parse_enums(self) -> dict[str, list[str]]:
|
|
"""Extract enum definitions"""
|
|
enums = {}
|
|
pattern = re.compile(
|
|
r"^\s*enum\s+(\w+)?\s*\{([^}]+)\}", re.MULTILINE | re.DOTALL
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
enum_name = match.group(1) or "Anonymous"
|
|
enum_body = match.group(2)
|
|
|
|
members = []
|
|
for member in enum_body.split(","):
|
|
member = re.sub(r"\s*#.*$", "", member).strip()
|
|
member = re.sub(r"\s*=.*$", "", member).strip()
|
|
if member:
|
|
members.append(member)
|
|
|
|
if members:
|
|
enums[enum_name] = members
|
|
|
|
return enums
|
|
|
|
def _parse_export_vars(self) -> list[dict[str, Any]]:
|
|
"""Extract @export variable declarations"""
|
|
export_vars = []
|
|
pattern = re.compile(
|
|
r"^\s*@export(?:\(([^)]+)\))?\s+var\s+(\w+)\s*(?::\s*([^=]+))?\s*(?:=\s*(.+?))?(?:\s*#.*)?$",
|
|
re.MULTILINE,
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
export_vars.append(
|
|
{
|
|
"name": match.group(2),
|
|
"type": match.group(3).strip() if match.group(3) else None,
|
|
"default": match.group(4).strip() if match.group(4) else None,
|
|
"export_hint": match.group(1),
|
|
}
|
|
)
|
|
|
|
return export_vars
|
|
|
|
def _parse_variables(self) -> list[dict[str, Any]]:
|
|
"""Extract regular variable declarations (excluding @export)"""
|
|
variables = []
|
|
|
|
for i, line in enumerate(self.lines):
|
|
stripped = line.strip()
|
|
|
|
# Skip empty lines, comments, @export vars
|
|
if (
|
|
not stripped
|
|
or stripped.startswith("#")
|
|
or stripped.startswith("@export")
|
|
):
|
|
continue
|
|
|
|
# Skip if previous line has @export
|
|
if i > 0 and "@export" in self.lines[i - 1]:
|
|
continue
|
|
|
|
# Match var declaration
|
|
var_match = re.match(
|
|
r"^\s*var\s+(\w+)\s*(?::\s*([^=]+))?\s*(?:=\s*(.+?))?(?:\s*#.*)?$",
|
|
line,
|
|
)
|
|
|
|
if var_match:
|
|
variables.append(
|
|
{
|
|
"name": var_match.group(1),
|
|
"type": var_match.group(2).strip()
|
|
if var_match.group(2)
|
|
else None,
|
|
"default": var_match.group(3).strip()
|
|
if var_match.group(3)
|
|
else None,
|
|
}
|
|
)
|
|
|
|
return variables
|
|
|
|
def _parse_functions(self) -> list[dict[str, Any]]:
|
|
"""Extract function definitions with signatures"""
|
|
functions = []
|
|
pattern = re.compile(
|
|
r"^\s*func\s+(\w+)\s*\(([^)]*)\)\s*(?:->\s*([^:]+))?\s*:",
|
|
re.MULTILINE,
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
func_data = self._extract_function_data(match)
|
|
if func_data:
|
|
functions.append(func_data)
|
|
|
|
return functions
|
|
|
|
def _extract_function_data(self, match) -> dict[str, Any] | None:
|
|
"""Extract data for a single function - flat logic"""
|
|
try:
|
|
func_name = match.group(1)
|
|
params_str = match.group(2)
|
|
return_type = match.group(3).strip() if match.group(3) else "void"
|
|
|
|
parameters = self._parse_function_params(params_str)
|
|
docstring = self._extract_docstring(match.start())
|
|
|
|
return {
|
|
"name": func_name,
|
|
"parameters": parameters,
|
|
"return_type": return_type,
|
|
"docstring": docstring,
|
|
}
|
|
except Exception:
|
|
return None
|
|
|
|
def _parse_function_params(self, params_str: str) -> list[dict[str, Any]]:
|
|
"""Parse function parameters - extracted for clarity"""
|
|
parameters = []
|
|
|
|
if not params_str.strip():
|
|
return parameters
|
|
|
|
for param in params_str.split(","):
|
|
param = param.strip()
|
|
if not param:
|
|
continue
|
|
|
|
param_data = self._parse_single_param(param)
|
|
if param_data:
|
|
parameters.append(param_data)
|
|
|
|
return parameters
|
|
|
|
def _parse_single_param(self, param: str) -> dict[str, Any] | None:
|
|
"""Parse a single parameter - flat logic"""
|
|
if ":" in param:
|
|
parts = param.split(":", 1)
|
|
if len(parts) < 2:
|
|
return None
|
|
|
|
param_name = parts[0].strip()
|
|
param_rest = parts[1].strip()
|
|
|
|
param_type = param_rest
|
|
param_default = None
|
|
|
|
if "=" in param_rest:
|
|
type_default = param_rest.split("=", 1)
|
|
param_type = type_default[0].strip()
|
|
param_default = (
|
|
type_default[1].strip() if len(type_default) > 1 else None
|
|
)
|
|
|
|
return {"name": param_name, "type": param_type, "default": param_default}
|
|
|
|
# Parameter without type
|
|
param_parts = param.split("=", 1)
|
|
return {
|
|
"name": param_parts[0].strip(),
|
|
"type": "Variant",
|
|
"default": param_parts[1].strip() if len(param_parts) > 1 else None,
|
|
}
|
|
|
|
def _extract_docstring(self, func_start_pos: int) -> str | None:
|
|
"""Extract docstring for a function"""
|
|
lines_before = self.content[:func_start_pos].count("\n")
|
|
docstring_lines = []
|
|
in_docstring = False
|
|
quote_style = None
|
|
|
|
for i in range(lines_before + 1, min(lines_before + 10, len(self.lines))):
|
|
line = self.lines[i].strip()
|
|
|
|
if not in_docstring:
|
|
if line.startswith('"""') or line.startswith("'''"):
|
|
quote_style = '"""' if line.startswith('"""') else "'''"
|
|
in_docstring = True
|
|
if line.count(quote_style) >= 2:
|
|
return line.strip(quote_style).strip()
|
|
docstring_lines.append(line[3:])
|
|
elif line.startswith("##"):
|
|
docstring_lines.append(line[2:].strip())
|
|
elif not line or line.startswith("#"):
|
|
continue
|
|
else:
|
|
break
|
|
else:
|
|
if quote_style in line:
|
|
docstring_lines.append(line.replace(quote_style, ""))
|
|
break
|
|
docstring_lines.append(line)
|
|
|
|
return "\n".join(docstring_lines).strip() if docstring_lines else None
|
|
|
|
def _parse_preloads(self) -> list[dict[str, str]]:
|
|
"""Extract preload() calls"""
|
|
preloads = []
|
|
pattern = re.compile(r'preload\s*\(\s*["\']([^"\']+)["\']\s*\)')
|
|
|
|
for match in pattern.finditer(self.content):
|
|
preloads.append({"path": match.group(1)})
|
|
|
|
return preloads
|
|
|
|
def _get_autoload_refs(self) -> list[str]:
|
|
"""Extract references to autoloaded singletons"""
|
|
pattern = re.compile(
|
|
r"\b(GameManager|AudioManager|DebugManager|SaveManager|SettingsManager|LocalizationManager|UIConstants)\b"
|
|
)
|
|
|
|
return sorted(
|
|
list(set(match.group(1) for match in pattern.finditer(self.content)))
|
|
)
|
|
|
|
def _parse_signal_emissions(self) -> list[dict[str, str]]:
|
|
"""Extract signal emission calls"""
|
|
emissions = []
|
|
pattern = re.compile(r"(\w+)\.emit\s*\(([^)]*)\)")
|
|
|
|
for match in pattern.finditer(self.content):
|
|
emissions.append(
|
|
{"signal": match.group(1), "arguments": match.group(2).strip()}
|
|
)
|
|
|
|
return emissions
|
|
|
|
def _get_input_actions(self) -> list[str]:
|
|
"""Extract input action references"""
|
|
pattern = re.compile(
|
|
r'is_action_(?:pressed|released|just_pressed|just_released)\s*\(\s*["\']([^"\']+)["\']'
|
|
)
|
|
|
|
return sorted(
|
|
list(set(match.group(1) for match in pattern.finditer(self.content)))
|
|
)
|
|
|
|
|
|
class SceneParser:
|
|
"""Parser for Godot scene (.tscn) files"""
|
|
|
|
def __init__(self, file_path: Path, content: str):
|
|
self.file_path = file_path
|
|
self.content = content
|
|
|
|
def parse(self) -> dict[str, Any]:
|
|
"""Parse scene file"""
|
|
if not self.content:
|
|
return {}
|
|
|
|
return {
|
|
"path": str(self.file_path),
|
|
"format_version": self._get_format_version(),
|
|
"uid": self._get_uid(),
|
|
"ext_resources": self._parse_ext_resources(),
|
|
"nodes": self._parse_nodes(),
|
|
"connections": self._parse_connections(),
|
|
}
|
|
|
|
def _get_format_version(self) -> int | None:
|
|
"""Extract format version from first line"""
|
|
first_line = self.content.split("\n")[0] if self.content else ""
|
|
match = re.search(r"format=(\d+)", first_line)
|
|
return int(match.group(1)) if match else None
|
|
|
|
def _get_uid(self) -> str | None:
|
|
"""Extract scene UID from first line"""
|
|
first_line = self.content.split("\n")[0] if self.content else ""
|
|
match = re.search(r'uid="([^"]+)"', first_line)
|
|
return match.group(1) if match else None
|
|
|
|
def _parse_ext_resources(self) -> list[dict[str, Any]]:
|
|
"""Extract external resource references"""
|
|
resources = []
|
|
pattern = re.compile(
|
|
r'\[ext_resource\s+type="([^"]+)"(?:\s+uid="([^"]+)")?\s+path="([^"]+)"(?:\s+id="([^"]+)")?\]'
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
resources.append(
|
|
{
|
|
"type": match.group(1),
|
|
"uid": match.group(2),
|
|
"path": match.group(3),
|
|
"id": match.group(4),
|
|
}
|
|
)
|
|
|
|
return resources
|
|
|
|
def _parse_nodes(self) -> list[dict[str, Any]]:
|
|
"""Extract node hierarchy"""
|
|
nodes = []
|
|
pattern = re.compile(
|
|
r'\[node\s+name="([^"]+)"(?:\s+type="([^"]+)")?(?:\s+parent="([^"]*)")?(?:[^\]]*instance=ExtResource\("([^"]+)"\))?[^\]]*\]'
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
node_info = {
|
|
"name": match.group(1),
|
|
"type": match.group(2),
|
|
"parent": match.group(3) or None,
|
|
"instance": match.group(4),
|
|
}
|
|
|
|
# Look for script attachment
|
|
start_pos = match.end()
|
|
next_section = self.content.find("[", start_pos)
|
|
if next_section == -1:
|
|
next_section = len(self.content)
|
|
|
|
node_section = self.content[start_pos:next_section]
|
|
script_match = re.search(
|
|
r'script\s*=\s*ExtResource\("([^"]+)"\)', node_section
|
|
)
|
|
if script_match:
|
|
node_info["script"] = script_match.group(1)
|
|
|
|
nodes.append(node_info)
|
|
|
|
return nodes
|
|
|
|
def _parse_connections(self) -> list[dict[str, str]]:
|
|
"""Extract signal connections"""
|
|
connections = []
|
|
pattern = re.compile(
|
|
r'\[connection\s+signal="([^"]+)"\s+from="([^"]*)"\s+to="([^"]*)"\s+method="([^"]+)"\]'
|
|
)
|
|
|
|
for match in pattern.finditer(self.content):
|
|
connections.append(
|
|
{
|
|
"signal": match.group(1),
|
|
"from": match.group(2),
|
|
"to": match.group(3),
|
|
"method": match.group(4),
|
|
}
|
|
)
|
|
|
|
return connections
|
|
|
|
|
|
class ProjectConfigParser:
|
|
"""Parser for project.godot configuration file"""
|
|
|
|
def __init__(self, content: str):
|
|
self.content = content
|
|
|
|
def parse(self) -> dict[str, Any]:
|
|
"""Parse project configuration"""
|
|
return {
|
|
"autoloads": self._parse_autoloads(),
|
|
"input_actions": self._parse_input_actions(),
|
|
"project_name": self._get_project_name(),
|
|
}
|
|
|
|
def _parse_autoloads(self) -> list[dict[str, str]]:
|
|
"""Extract autoload configuration"""
|
|
autoloads = []
|
|
pattern = re.compile(r'([^/]+)="\*(.+)"')
|
|
|
|
in_autoload_section = False
|
|
for line in self.content.split("\n"):
|
|
line = line.strip()
|
|
|
|
if line.startswith("[autoload]"):
|
|
in_autoload_section = True
|
|
continue
|
|
|
|
if in_autoload_section:
|
|
if line.startswith("["):
|
|
break
|
|
|
|
match = pattern.match(line)
|
|
if match:
|
|
autoloads.append({"name": match.group(1), "path": match.group(2)})
|
|
|
|
return autoloads
|
|
|
|
def _parse_input_actions(self) -> dict[str, list[str]]:
|
|
"""Extract input action mappings"""
|
|
actions = {}
|
|
current_action = None
|
|
|
|
in_input_section = False
|
|
for line in self.content.split("\n"):
|
|
line = line.strip()
|
|
|
|
if line.startswith("[input]"):
|
|
in_input_section = True
|
|
continue
|
|
|
|
if in_input_section:
|
|
if line.startswith("["):
|
|
break
|
|
|
|
if "=" in line:
|
|
action_name = line.split("=")[0].strip()
|
|
current_action = action_name
|
|
actions[action_name] = []
|
|
|
|
if current_action and '"physical_keycode":' in line:
|
|
key_match = re.search(r'"physical_keycode":\s*(\d+)', line)
|
|
if key_match:
|
|
actions[current_action].append(f"Key({key_match.group(1)})")
|
|
|
|
return actions
|
|
|
|
def _get_project_name(self) -> str | None:
|
|
"""Extract project name"""
|
|
match = re.search(r'config/name="([^"]+)"', self.content)
|
|
return match.group(1) if match else None
|
|
|
|
|
|
class CodeMapGenerator:
|
|
"""Main code map generator"""
|
|
|
|
def __init__(self, project_root: Path, verbose: bool = False):
|
|
self.project_root = project_root
|
|
self.verbose = verbose
|
|
self.gitignore_patterns = self._read_gitignore()
|
|
|
|
def _read_gitignore(self) -> list[str]:
|
|
"""Read .gitignore patterns"""
|
|
gitignore_path = self.project_root / ".gitignore"
|
|
if not gitignore_path.exists():
|
|
return []
|
|
|
|
patterns = []
|
|
with open(gitignore_path, encoding="utf-8") as f:
|
|
for line in f:
|
|
line = line.strip()
|
|
if line and not line.startswith("#"):
|
|
patterns.append(line)
|
|
|
|
return patterns
|
|
|
|
def _should_ignore(self, path: Path) -> bool:
|
|
"""Check if path should be ignored"""
|
|
relative = path.relative_to(self.project_root)
|
|
path_str = str(relative).replace("\\", "/")
|
|
|
|
for pattern in self.gitignore_patterns:
|
|
if pattern.endswith("/"):
|
|
if any(part == pattern[:-1] for part in relative.parts):
|
|
return True
|
|
elif pattern in path_str or relative.name == pattern:
|
|
return True
|
|
|
|
return False
|
|
|
|
def generate(self) -> dict[str, Any]:
|
|
"""Generate complete code map"""
|
|
if self.verbose:
|
|
print("🔍 Scanning project directory...")
|
|
|
|
gd_files = [
|
|
f for f in self.project_root.rglob("*.gd") if not self._should_ignore(f)
|
|
]
|
|
tscn_files = [
|
|
f for f in self.project_root.rglob("*.tscn") if not self._should_ignore(f)
|
|
]
|
|
|
|
if self.verbose:
|
|
print(f" Found {len(gd_files)} GDScript files")
|
|
print(f" Found {len(tscn_files)} scene files")
|
|
|
|
# Parse project config
|
|
project_config = self._parse_project_config()
|
|
|
|
# Parse scripts
|
|
scripts = self._parse_scripts(gd_files)
|
|
|
|
# Parse scenes
|
|
scenes = self._parse_scenes(tscn_files)
|
|
|
|
# Build dependencies
|
|
dependencies = self._analyze_dependencies(scripts, scenes)
|
|
|
|
# Compile final map
|
|
return {
|
|
"project": {
|
|
"name": project_config.get("project_name", "Unknown"),
|
|
"root": str(self.project_root),
|
|
"godot_version": "4.4",
|
|
},
|
|
"autoloads": project_config.get("autoloads", []),
|
|
"input_actions": project_config.get("input_actions", {}),
|
|
"scripts": scripts,
|
|
"scenes": scenes,
|
|
"dependencies": dependencies,
|
|
"statistics": {
|
|
"total_scripts": len(scripts),
|
|
"total_scenes": len(scenes),
|
|
"total_functions": sum(
|
|
len(s.get("functions", [])) for s in scripts.values()
|
|
),
|
|
"total_signals": sum(
|
|
len(s.get("signals", [])) for s in scripts.values()
|
|
),
|
|
"total_connections": sum(
|
|
len(s.get("connections", [])) for s in scenes.values()
|
|
),
|
|
},
|
|
}
|
|
|
|
def _parse_project_config(self) -> dict[str, Any]:
|
|
"""Parse project.godot configuration"""
|
|
project_godot_path = self.project_root / "project.godot"
|
|
|
|
if not project_godot_path.exists():
|
|
return {}
|
|
|
|
if self.verbose:
|
|
print("\n📋 Parsing project configuration...")
|
|
|
|
with open(project_godot_path, encoding="utf-8") as f:
|
|
parser = ProjectConfigParser(f.read())
|
|
return parser.parse()
|
|
|
|
def _parse_scripts(self, gd_files: list[Path]) -> dict[str, Any]:
|
|
"""Parse all GDScript files"""
|
|
if self.verbose:
|
|
print(f"\n📝 Parsing {len(gd_files)} GDScript files...")
|
|
|
|
scripts = {}
|
|
for i, gd_file in enumerate(gd_files):
|
|
if self.verbose and (i + 1) % 10 == 0:
|
|
print(f" Progress: {i + 1}/{len(gd_files)}")
|
|
|
|
try:
|
|
with open(gd_file, encoding="utf-8") as f:
|
|
parser = GDScriptParser(gd_file, f.read())
|
|
res_path = self._to_res_path(gd_file)
|
|
scripts[res_path] = parser.parse()
|
|
except Exception as e:
|
|
if self.verbose:
|
|
print(f" ⚠️ Error parsing {gd_file}: {e}")
|
|
|
|
return scripts
|
|
|
|
def _parse_scenes(self, tscn_files: list[Path]) -> dict[str, Any]:
|
|
"""Parse all scene files"""
|
|
if self.verbose:
|
|
print(f"\n🎬 Parsing {len(tscn_files)} scene files...")
|
|
|
|
scenes = {}
|
|
for i, tscn_file in enumerate(tscn_files):
|
|
if self.verbose and (i + 1) % 10 == 0:
|
|
print(f" Progress: {i + 1}/{len(tscn_files)}")
|
|
|
|
try:
|
|
with open(tscn_file, encoding="utf-8") as f:
|
|
parser = SceneParser(tscn_file, f.read())
|
|
res_path = self._to_res_path(tscn_file)
|
|
scenes[res_path] = parser.parse()
|
|
except Exception as e:
|
|
if self.verbose:
|
|
print(f" ⚠️ Error parsing {tscn_file}: {e}")
|
|
|
|
return scenes
|
|
|
|
def _to_res_path(self, file_path: Path) -> str:
|
|
"""Convert absolute path to res:// path"""
|
|
relative = file_path.relative_to(self.project_root)
|
|
return "res://" + str(relative).replace("\\", "/")
|
|
|
|
def _analyze_dependencies(
|
|
self, scripts: dict[str, Any], scenes: dict[str, Any]
|
|
) -> dict[str, Any]:
|
|
"""Analyze dependencies between scripts and scenes"""
|
|
deps = {
|
|
"script_to_script": [],
|
|
"scene_to_script": [],
|
|
"scene_to_scene": [],
|
|
"signal_connections": [],
|
|
}
|
|
|
|
# Script to script dependencies (via preloads)
|
|
for script_path, script_data in scripts.items():
|
|
for preload in script_data.get("preloads", []):
|
|
target = preload.get("path")
|
|
if target and target.endswith(".gd"):
|
|
deps["script_to_script"].append(
|
|
{"from": script_path, "to": target, "type": "preload"}
|
|
)
|
|
|
|
# Scene to script dependencies
|
|
for scene_path, scene_data in scenes.items():
|
|
for node in scene_data.get("nodes", []):
|
|
if "script" in node:
|
|
script_id = node["script"]
|
|
for resource in scene_data.get("ext_resources", []):
|
|
if resource.get("id") == script_id:
|
|
deps["scene_to_script"].append(
|
|
{
|
|
"scene": scene_path,
|
|
"script": resource.get("path"),
|
|
"node": node.get("name"),
|
|
}
|
|
)
|
|
|
|
# Scene to scene dependencies (via instances)
|
|
for scene_path, scene_data in scenes.items():
|
|
for resource in scene_data.get("ext_resources", []):
|
|
if resource.get("type") == "PackedScene":
|
|
deps["scene_to_scene"].append(
|
|
{
|
|
"parent": scene_path,
|
|
"instanced": resource.get("path"),
|
|
"type": "instance",
|
|
}
|
|
)
|
|
|
|
# Signal connections
|
|
for scene_path, scene_data in scenes.items():
|
|
for connection in scene_data.get("connections", []):
|
|
deps["signal_connections"].append(
|
|
{
|
|
"scene": scene_path,
|
|
"signal": connection.get("signal"),
|
|
"from_node": connection.get("from"),
|
|
"to_node": connection.get("to"),
|
|
"method": connection.get("method"),
|
|
}
|
|
)
|
|
|
|
return deps
|
|
|
|
|
|
def _extract_api_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract API-focused map: functions, signals, parameters, types"""
|
|
scripts = {}
|
|
for path, script_data in code_map.get("scripts", {}).items():
|
|
scripts[path] = {
|
|
"class_name": script_data.get("class_name"),
|
|
"extends": script_data.get("extends"),
|
|
"functions": script_data.get("functions", []),
|
|
"signals": script_data.get("signals", []),
|
|
"constants": script_data.get("constants", []),
|
|
"enums": script_data.get("enums", []),
|
|
}
|
|
|
|
return {
|
|
"project": code_map["project"],
|
|
"scripts": scripts,
|
|
"statistics": code_map["statistics"],
|
|
}
|
|
|
|
|
|
def _extract_architecture_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract architecture-focused map: autoloads, patterns, structure"""
|
|
scripts = {}
|
|
for path, script_data in code_map.get("scripts", {}).items():
|
|
scripts[path] = {
|
|
"class_name": script_data.get("class_name"),
|
|
"extends": script_data.get("extends"),
|
|
"autoload_refs": script_data.get("autoload_refs", []),
|
|
"preloads": script_data.get("preloads", []),
|
|
}
|
|
|
|
return {
|
|
"project": code_map["project"],
|
|
"autoloads": code_map.get("autoloads", []),
|
|
"scripts": scripts,
|
|
"scenes": code_map.get("scenes", {}),
|
|
"dependencies": code_map.get("dependencies", {}),
|
|
}
|
|
|
|
|
|
def _extract_flows_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract flow-focused map: signals, connections, scene transitions"""
|
|
scripts = {}
|
|
for path, script_data in code_map.get("scripts", {}).items():
|
|
if script_data.get("signals") or script_data.get("signal_emissions"):
|
|
scripts[path] = {
|
|
"signals": script_data.get("signals", []),
|
|
"signal_emissions": script_data.get("signal_emissions", []),
|
|
}
|
|
|
|
scenes = {}
|
|
for path, scene_data in code_map.get("scenes", {}).items():
|
|
if scene_data.get("connections"):
|
|
scenes[path] = {
|
|
"connections": scene_data.get("connections", []),
|
|
}
|
|
|
|
return {
|
|
"project": code_map["project"],
|
|
"scripts": scripts,
|
|
"scenes": scenes,
|
|
"signal_connections": code_map.get("dependencies", {}).get(
|
|
"signal_connections", []
|
|
),
|
|
}
|
|
|
|
|
|
def _extract_security_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract security-focused map: validation patterns, error handling"""
|
|
scripts = {}
|
|
security_keywords = ["validate", "check", "verify", "sanitize", "load", "save"]
|
|
|
|
for path, script_data in code_map.get("scripts", {}).items():
|
|
security_functions = [
|
|
func
|
|
for func in script_data.get("functions", [])
|
|
if any(
|
|
keyword in func.get("name", "").lower() for keyword in security_keywords
|
|
)
|
|
]
|
|
|
|
if security_functions or script_data.get("input_actions"):
|
|
scripts[path] = {
|
|
"functions": security_functions,
|
|
"input_actions": script_data.get("input_actions", []),
|
|
}
|
|
|
|
return {
|
|
"project": code_map["project"],
|
|
"scripts": scripts,
|
|
"input_actions": code_map.get("input_actions", {}),
|
|
}
|
|
|
|
|
|
def _extract_assets_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract assets-focused map: dependencies, resources, licensing"""
|
|
scripts = {}
|
|
for path, script_data in code_map.get("scripts", {}).items():
|
|
if script_data.get("preloads"):
|
|
scripts[path] = {
|
|
"preloads": script_data.get("preloads", []),
|
|
}
|
|
|
|
scenes = {}
|
|
for path, scene_data in code_map.get("scenes", {}).items():
|
|
if scene_data.get("ext_resources"):
|
|
scenes[path] = {
|
|
"resources": scene_data.get("ext_resources", []),
|
|
}
|
|
|
|
return {
|
|
"project": code_map["project"],
|
|
"scripts": scripts,
|
|
"scenes": scenes,
|
|
"dependencies": code_map.get("dependencies", {}),
|
|
}
|
|
|
|
|
|
def _extract_metadata_map(code_map: dict[str, Any]) -> dict[str, Any]:
|
|
"""Extract metadata: statistics, quality metrics, TODOs"""
|
|
# Note: TODOs would require storing file content in the main map
|
|
# For now, just extract statistics
|
|
return {
|
|
"project": code_map["project"],
|
|
"statistics": code_map["statistics"],
|
|
"scripts_count": len(code_map.get("scripts", {})),
|
|
"scenes_count": len(code_map.get("scenes", {})),
|
|
"autoloads_count": len(code_map.get("autoloads", [])),
|
|
}
|
|
|
|
|
|
def _generate_mermaid_diagrams(
|
|
code_map: dict[str, Any], diagrams_dir: Path, verbose: bool = False
|
|
) -> list[Path]:
|
|
"""Generate Mermaid diagram source files"""
|
|
diagrams_dir.mkdir(exist_ok=True, parents=True)
|
|
generated_files = []
|
|
|
|
if verbose:
|
|
print("\n📊 Generating Mermaid diagrams...")
|
|
|
|
# Architecture diagram
|
|
mmd_content = _create_architecture_diagram(code_map)
|
|
arch_file = diagrams_dir / "architecture.mmd"
|
|
arch_file.write_text(mmd_content, encoding="utf-8")
|
|
generated_files.append(arch_file)
|
|
if verbose:
|
|
print(f" ✅ {arch_file.name}")
|
|
|
|
# Signal flows diagram
|
|
mmd_content = _create_signal_flows_diagram(code_map)
|
|
signals_file = diagrams_dir / "signal_flows.mmd"
|
|
signals_file.write_text(mmd_content, encoding="utf-8")
|
|
generated_files.append(signals_file)
|
|
if verbose:
|
|
print(f" ✅ {signals_file.name}")
|
|
|
|
# Scene hierarchy diagram
|
|
mmd_content = _create_scene_hierarchy_diagram(code_map)
|
|
scene_file = diagrams_dir / "scene_hierarchy.mmd"
|
|
scene_file.write_text(mmd_content, encoding="utf-8")
|
|
generated_files.append(scene_file)
|
|
if verbose:
|
|
print(f" ✅ {scene_file.name}")
|
|
|
|
# Dependency graph
|
|
mmd_content = _create_dependency_graph(code_map)
|
|
dep_file = diagrams_dir / "dependency_graph.mmd"
|
|
dep_file.write_text(mmd_content, encoding="utf-8")
|
|
generated_files.append(dep_file)
|
|
if verbose:
|
|
print(f" ✅ {dep_file.name}")
|
|
|
|
return generated_files
|
|
|
|
|
|
def _create_architecture_diagram(code_map: dict[str, Any]) -> str:
|
|
"""Create Mermaid architecture diagram showing autoloads and dependencies"""
|
|
lines = ["graph TB"]
|
|
|
|
# Add autoloads as nodes
|
|
for autoload in code_map.get("autoloads", []):
|
|
name = autoload.get("name", "Unknown")
|
|
lines.append(f' {name}["{name}"]')
|
|
|
|
# Add dependencies between autoloads (based on autoload_refs)
|
|
for script_path, script_data in code_map.get("scripts", {}).items():
|
|
if "autoloads" in script_path:
|
|
script_name = Path(script_path).stem
|
|
for ref in script_data.get("autoload_refs", []):
|
|
if ref != script_name:
|
|
lines.append(f" {script_name} --> {ref}")
|
|
|
|
return "\n".join(lines)
|
|
|
|
|
|
def _create_signal_flows_diagram(code_map: dict[str, Any]) -> str:
|
|
"""Create Mermaid diagram showing signal connections"""
|
|
lines = ["graph LR"]
|
|
|
|
connections = code_map.get("dependencies", {}).get("signal_connections", [])
|
|
for conn in connections[:20]: # Limit to first 20 to avoid clutter
|
|
scene = Path(conn.get("scene", "")).stem
|
|
signal = conn.get("signal", "unknown")
|
|
from_node = conn.get("from_node", "")
|
|
to_node = conn.get("to_node", "")
|
|
method = conn.get("method", "")
|
|
|
|
if from_node and to_node:
|
|
lines.append(f" {from_node} -->|{signal}| {to_node}")
|
|
|
|
return (
|
|
"\n".join(lines)
|
|
if len(lines) > 1
|
|
else "graph LR\n Note[No signal connections found]"
|
|
)
|
|
|
|
|
|
def _create_scene_hierarchy_diagram(code_map: dict[str, Any]) -> str:
|
|
"""Create Mermaid diagram showing scene hierarchy"""
|
|
lines = ["graph TD"]
|
|
|
|
for scene_path, scene_data in code_map.get("scenes", {}).items():
|
|
scene_name = Path(scene_path).stem
|
|
for node in scene_data.get("nodes", [])[:10]: # Limit nodes per scene
|
|
node_name = node.get("name", "Unknown")
|
|
node_type = node.get("type", "")
|
|
parent = node.get("parent")
|
|
|
|
if parent == "." or not parent:
|
|
lines.append(
|
|
f' {scene_name} --> {node_name}["{node_name}\\n{node_type}"]'
|
|
)
|
|
|
|
return "\n".join(lines) if len(lines) > 1 else "graph TD\n Note[No scenes found]"
|
|
|
|
|
|
def _create_dependency_graph(code_map: dict[str, Any]) -> str:
|
|
"""Create Mermaid diagram showing module dependencies"""
|
|
lines = ["graph LR"]
|
|
|
|
# Script to script dependencies
|
|
deps = code_map.get("dependencies", {}).get("script_to_script", [])
|
|
for dep in deps[:30]: # Limit to avoid clutter
|
|
from_script = Path(dep.get("from", "")).stem
|
|
to_script = Path(dep.get("to", "")).stem
|
|
lines.append(f" {from_script} --> {to_script}")
|
|
|
|
return (
|
|
"\n".join(lines)
|
|
if len(lines) > 1
|
|
else "graph LR\n Note[No dependencies found]"
|
|
)
|
|
|
|
|
|
def _render_diagrams_with_matplotlib(mmd_files: list[Path], verbose: bool = False) -> list[Path]:
|
|
"""Render diagrams from Mermaid source using matplotlib"""
|
|
try:
|
|
import matplotlib
|
|
matplotlib.use("Agg")
|
|
import matplotlib.pyplot as plt
|
|
import matplotlib.patches as patches
|
|
except ImportError:
|
|
if verbose:
|
|
print(" ⚠️ matplotlib not available, cannot generate diagrams")
|
|
return []
|
|
|
|
if verbose:
|
|
print("\n🎨 Rendering diagrams to PNG...")
|
|
|
|
rendered_files = []
|
|
|
|
for mmd_file in mmd_files:
|
|
try:
|
|
content = mmd_file.read_text(encoding="utf-8")
|
|
diagram_name = mmd_file.stem
|
|
|
|
# Create figure
|
|
fig, ax = plt.subplots(figsize=(12, 8))
|
|
ax.set_xlim(0, 10)
|
|
ax.set_ylim(0, 10)
|
|
ax.axis('off')
|
|
|
|
# Add title
|
|
title_map = {
|
|
"architecture": "Autoload System Architecture",
|
|
"signal_flows": "Signal Flow Connections",
|
|
"scene_hierarchy": "Scene Hierarchy",
|
|
"dependency_graph": "Module Dependencies"
|
|
}
|
|
title = title_map.get(diagram_name, diagram_name.replace("_", " ").title())
|
|
ax.text(5, 9.5, title, ha='center', va='top', fontsize=16, weight='bold')
|
|
|
|
# Parse simple nodes from Mermaid (basic extraction)
|
|
nodes = []
|
|
for line in content.split('\n'):
|
|
line = line.strip()
|
|
if '[' in line and ']' in line:
|
|
# Extract node name
|
|
parts = line.split('[')
|
|
if len(parts) > 1:
|
|
node_name = parts[1].split(']')[0]
|
|
if node_name and not node_name.startswith('_'):
|
|
nodes.append(node_name)
|
|
|
|
# Remove duplicates while preserving order
|
|
seen = set()
|
|
unique_nodes = []
|
|
for node in nodes:
|
|
if node not in seen:
|
|
seen.add(node)
|
|
unique_nodes.append(node)
|
|
|
|
# Layout nodes in a grid
|
|
if unique_nodes:
|
|
cols = min(3, len(unique_nodes))
|
|
rows = (len(unique_nodes) + cols - 1) // cols
|
|
|
|
y_start = 8
|
|
y_spacing = 6 / max(rows, 1)
|
|
x_spacing = 9 / max(cols, 1)
|
|
|
|
for i, node in enumerate(unique_nodes[:15]): # Limit to 15 nodes
|
|
row = i // cols
|
|
col = i % cols
|
|
x = 0.5 + col * x_spacing + x_spacing / 2
|
|
y = y_start - row * y_spacing
|
|
|
|
# Draw box
|
|
rect = patches.FancyBboxPatch(
|
|
(x - 1, y - 0.3), 2, 0.6,
|
|
boxstyle="round,pad=0.1",
|
|
edgecolor='#3498db',
|
|
facecolor='#ecf0f1',
|
|
linewidth=2
|
|
)
|
|
ax.add_patch(rect)
|
|
|
|
# Add text
|
|
ax.text(x, y, node, ha='center', va='center', fontsize=9, weight='bold')
|
|
else:
|
|
# No nodes found
|
|
ax.text(5, 5, "No diagram data available", ha='center', va='center', fontsize=12, style='italic')
|
|
|
|
# Add note
|
|
ax.text(5, 0.3, "Auto-generated diagram",
|
|
ha='center', va='bottom', fontsize=8, style='italic', color='gray')
|
|
|
|
plt.tight_layout()
|
|
|
|
png_file = mmd_file.with_suffix(".png")
|
|
plt.savefig(png_file, dpi=150, bbox_inches='tight', facecolor='white')
|
|
plt.close()
|
|
|
|
rendered_files.append(png_file)
|
|
if verbose:
|
|
print(f" ✅ {png_file.name}")
|
|
|
|
except Exception as e:
|
|
if verbose:
|
|
print(f" ⚠️ Error rendering {mmd_file.name}: {e}")
|
|
|
|
return rendered_files
|
|
|
|
|
|
def _render_mermaid_to_png(mmd_files: list[Path], verbose: bool = False) -> list[Path]:
|
|
"""Render Mermaid files to PNG using matplotlib"""
|
|
return _render_diagrams_with_matplotlib(mmd_files, verbose)
|
|
|
|
|
|
def _generate_markdown_docs(
|
|
code_map: dict[str, Any], docs_dir: Path, diagrams_dir: Path, verbose: bool = False
|
|
):
|
|
"""Generate human-readable markdown documentation"""
|
|
docs_dir.mkdir(exist_ok=True, parents=True)
|
|
|
|
if verbose:
|
|
print("\n📝 Generating markdown documentation...")
|
|
|
|
# Copy diagrams to docs directory for standalone viewing
|
|
docs_diagrams_dir = docs_dir / "diagrams"
|
|
docs_diagrams_dir.mkdir(exist_ok=True, parents=True)
|
|
|
|
if diagrams_dir.exists():
|
|
if verbose:
|
|
print(" 📋 Copying diagrams to docs directory...")
|
|
for diagram_file in diagrams_dir.glob("*.png"):
|
|
dest_file = docs_diagrams_dir / diagram_file.name
|
|
shutil.copy2(diagram_file, dest_file)
|
|
if verbose:
|
|
print(f" ✅ {diagram_file.name}")
|
|
|
|
# Also copy Mermaid source files for reference
|
|
for mmd_file in diagrams_dir.glob("*.mmd"):
|
|
dest_file = docs_diagrams_dir / mmd_file.name
|
|
shutil.copy2(mmd_file, dest_file)
|
|
|
|
# Autoloads API reference
|
|
_generate_autoloads_api_doc(code_map, docs_dir, docs_diagrams_dir, verbose)
|
|
|
|
# Signals catalog
|
|
_generate_signals_catalog(code_map, docs_dir, docs_diagrams_dir, verbose)
|
|
|
|
# Function index
|
|
_generate_function_index(code_map, docs_dir, verbose)
|
|
|
|
# Scene reference
|
|
_generate_scene_reference(code_map, docs_dir, docs_diagrams_dir, verbose)
|
|
|
|
# TODO list
|
|
_generate_todo_list(code_map, docs_dir, verbose)
|
|
|
|
|
|
def _generate_autoloads_api_doc(
|
|
code_map: dict[str, Any], docs_dir: Path, diagrams_dir: Path, verbose: bool
|
|
):
|
|
"""Generate autoloads API reference"""
|
|
output = ["# Autoloads API Reference", "", "*Auto-generated documentation*", ""]
|
|
|
|
# Embed architecture diagram if exists
|
|
arch_diagram = diagrams_dir / "architecture.png"
|
|
if arch_diagram.exists():
|
|
output.append(f"")
|
|
output.append("")
|
|
|
|
for autoload in code_map.get("autoloads", []):
|
|
name = autoload.get("name", "Unknown")
|
|
path = autoload.get("path", "")
|
|
output.append(f"## {name}")
|
|
output.append(f"**Path**: `{path}`")
|
|
output.append("")
|
|
|
|
# Find script data
|
|
script_data = code_map.get("scripts", {}).get(path, {})
|
|
|
|
# Add functions
|
|
functions = script_data.get("functions", [])
|
|
if functions:
|
|
output.append("### Functions")
|
|
output.append("")
|
|
for func in functions:
|
|
params = ", ".join(
|
|
[
|
|
f"{p['name']}: {p.get('type', 'Variant')}"
|
|
for p in func.get("parameters", [])
|
|
]
|
|
)
|
|
return_type = func.get("return_type", "void")
|
|
output.append(f"- **`{func['name']}({params}) -> {return_type}`**")
|
|
if func.get("docstring"):
|
|
output.append(f" - {func['docstring']}")
|
|
output.append("")
|
|
|
|
# Add signals
|
|
signals = script_data.get("signals", [])
|
|
if signals:
|
|
output.append("### Signals")
|
|
output.append("")
|
|
for sig in signals:
|
|
params = ", ".join(
|
|
[
|
|
f"{p['name']}: {p.get('type', 'Variant')}"
|
|
for p in sig.get("parameters", [])
|
|
]
|
|
)
|
|
output.append(f"- **`{sig['name']}({params})`**")
|
|
output.append("")
|
|
|
|
doc_file = docs_dir / "AUTOLOADS_API.md"
|
|
doc_file.write_text("\n".join(output), encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {doc_file.name}")
|
|
|
|
|
|
def _generate_signals_catalog(
|
|
code_map: dict[str, Any], docs_dir: Path, diagrams_dir: Path, verbose: bool
|
|
):
|
|
"""Generate signals catalog"""
|
|
output = ["# Signals Catalog", "", "*Auto-generated documentation*", ""]
|
|
|
|
# Embed signal flows diagram if exists
|
|
signals_diagram = diagrams_dir / "signal_flows.png"
|
|
if signals_diagram.exists():
|
|
output.append(f"")
|
|
output.append("")
|
|
|
|
output.append("## Signal Definitions")
|
|
output.append("")
|
|
|
|
for script_path, script_data in code_map.get("scripts", {}).items():
|
|
signals = script_data.get("signals", [])
|
|
if signals:
|
|
script_name = Path(script_path).stem
|
|
output.append(f"### {script_name}")
|
|
output.append(f"*Source: `{script_path}`*")
|
|
output.append("")
|
|
for sig in signals:
|
|
params = ", ".join(
|
|
[
|
|
f"{p['name']}: {p.get('type', 'Variant')}"
|
|
for p in sig.get("parameters", [])
|
|
]
|
|
)
|
|
output.append(f"- **`{sig['name']}({params})`**")
|
|
output.append("")
|
|
|
|
doc_file = docs_dir / "SIGNALS_CATALOG.md"
|
|
doc_file.write_text("\n".join(output), encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {doc_file.name}")
|
|
|
|
|
|
def _generate_function_index(code_map: dict[str, Any], docs_dir: Path, verbose: bool):
|
|
"""Generate searchable function index"""
|
|
output = ["# Function Index", "", "*Auto-generated documentation*", ""]
|
|
|
|
for script_path, script_data in code_map.get("scripts", {}).items():
|
|
functions = script_data.get("functions", [])
|
|
if functions:
|
|
script_name = Path(script_path).stem
|
|
output.append(f"## {script_name}")
|
|
output.append(f"*Source: `{script_path}`*")
|
|
output.append("")
|
|
for func in functions:
|
|
params = ", ".join(
|
|
[
|
|
f"{p['name']}: {p.get('type', 'Variant')}"
|
|
for p in func.get("parameters", [])
|
|
]
|
|
)
|
|
return_type = func.get("return_type", "void")
|
|
output.append(f"### `{func['name']}({params}) -> {return_type}`")
|
|
if func.get("docstring"):
|
|
output.append(f"{func['docstring']}")
|
|
output.append("")
|
|
|
|
doc_file = docs_dir / "FUNCTION_INDEX.md"
|
|
doc_file.write_text("\n".join(output), encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {doc_file.name}")
|
|
|
|
|
|
def _generate_scene_reference(
|
|
code_map: dict[str, Any], docs_dir: Path, diagrams_dir: Path, verbose: bool
|
|
):
|
|
"""Generate scene reference documentation"""
|
|
output = ["# Scene Reference", "", "*Auto-generated documentation*", ""]
|
|
|
|
# Embed scene hierarchy diagram if exists
|
|
scene_diagram = diagrams_dir / "scene_hierarchy.png"
|
|
if scene_diagram.exists():
|
|
output.append(f"")
|
|
output.append("")
|
|
|
|
for scene_path, scene_data in code_map.get("scenes", {}).items():
|
|
scene_name = Path(scene_path).stem
|
|
output.append(f"## {scene_name}")
|
|
output.append(f"*Path: `{scene_path}`*")
|
|
output.append("")
|
|
|
|
nodes = scene_data.get("nodes", [])
|
|
if nodes:
|
|
output.append("### Nodes")
|
|
for node in nodes[:15]: # Limit to avoid huge docs
|
|
output.append(f"- **{node.get('name')}** ({node.get('type', 'Node')})")
|
|
if node.get("script"):
|
|
output.append(f" - Script: `{node.get('script')}`")
|
|
output.append("")
|
|
|
|
doc_file = docs_dir / "SCENE_REFERENCE.md"
|
|
doc_file.write_text("\n".join(output), encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {doc_file.name}")
|
|
|
|
|
|
def _generate_todo_list(code_map: dict[str, Any], docs_dir: Path, verbose: bool):
|
|
"""Generate TODO list from code comments (placeholder for now)"""
|
|
output = ["# TODO List", "", "*Auto-generated from code comments*", ""]
|
|
output.append("_Note: TODO extraction not yet implemented_")
|
|
output.append("")
|
|
output.append("To add TODO tracking:")
|
|
output.append("1. Parse file content during code map generation")
|
|
output.append("2. Extract TODO, FIXME, NOTE, HACK comments")
|
|
output.append("3. Group by file/category")
|
|
|
|
doc_file = docs_dir / "TODO_LIST.md"
|
|
doc_file.write_text("\n".join(output), encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {doc_file.name}")
|
|
|
|
|
|
def _generate_metrics_dashboard(
|
|
code_map: dict[str, Any], docs_dir: Path, verbose: bool
|
|
):
|
|
"""Generate metrics dashboard markdown with charts"""
|
|
if verbose:
|
|
print("\n📈 Generating metrics dashboard...")
|
|
|
|
try:
|
|
import matplotlib
|
|
|
|
matplotlib.use("Agg") # Non-GUI backend
|
|
import matplotlib.pyplot as plt
|
|
except ImportError:
|
|
if verbose:
|
|
print(" ⚠️ matplotlib not available, skipping metrics")
|
|
return
|
|
|
|
# Create charts directory
|
|
charts_dir = docs_dir / "charts"
|
|
charts_dir.mkdir(exist_ok=True)
|
|
|
|
# Generate charts
|
|
_create_stats_chart(code_map, charts_dir, verbose)
|
|
_create_complexity_chart(code_map, charts_dir, verbose)
|
|
|
|
# Generate Markdown dashboard
|
|
markdown_content = _create_dashboard_markdown(code_map)
|
|
|
|
dashboard_file = docs_dir / "METRICS.md"
|
|
dashboard_file.write_text(markdown_content, encoding="utf-8")
|
|
if verbose:
|
|
print(f" ✅ {dashboard_file.name}")
|
|
|
|
|
|
def _create_stats_chart(code_map: dict[str, Any], charts_dir: Path, verbose: bool):
|
|
"""Create statistics overview chart"""
|
|
import matplotlib.pyplot as plt
|
|
|
|
stats = code_map.get("statistics", {})
|
|
labels = ["Scripts", "Scenes", "Functions", "Signals", "Connections"]
|
|
values = [
|
|
stats.get("total_scripts", 0),
|
|
stats.get("total_scenes", 0),
|
|
stats.get("total_functions", 0),
|
|
stats.get("total_signals", 0),
|
|
stats.get("total_connections", 0),
|
|
]
|
|
|
|
plt.figure(figsize=(10, 6))
|
|
plt.bar(
|
|
labels, values, color=["#3498db", "#2ecc71", "#f39c12", "#e74c3c", "#9b59b6"]
|
|
)
|
|
plt.title("Project Statistics Overview")
|
|
plt.ylabel("Count")
|
|
plt.tight_layout()
|
|
|
|
chart_file = charts_dir / "stats_overview.png"
|
|
plt.savefig(chart_file, dpi=150)
|
|
plt.close()
|
|
|
|
if verbose:
|
|
print(f" ✅ {chart_file.name}")
|
|
|
|
|
|
def _create_complexity_chart(code_map: dict[str, Any], charts_dir: Path, verbose: bool):
|
|
"""Create complexity metrics chart"""
|
|
import matplotlib.pyplot as plt
|
|
|
|
# Calculate functions per script
|
|
script_complexity = []
|
|
for script_path, script_data in code_map.get("scripts", {}).items():
|
|
func_count = len(script_data.get("functions", []))
|
|
if func_count > 0:
|
|
script_complexity.append((Path(script_path).stem, func_count))
|
|
|
|
# Sort and get top 10
|
|
script_complexity.sort(key=lambda x: x[1], reverse=True)
|
|
top_scripts = script_complexity[:10]
|
|
|
|
if top_scripts:
|
|
labels = [s[0] for s in top_scripts]
|
|
values = [s[1] for s in top_scripts]
|
|
|
|
plt.figure(figsize=(12, 6))
|
|
plt.barh(labels, values, color="#3498db")
|
|
plt.xlabel("Function Count")
|
|
plt.title("Top 10 Scripts by Function Count")
|
|
plt.tight_layout()
|
|
|
|
chart_file = charts_dir / "complexity_graph.png"
|
|
plt.savefig(chart_file, dpi=150)
|
|
plt.close()
|
|
|
|
if verbose:
|
|
print(f" ✅ {chart_file.name}")
|
|
|
|
|
|
def _create_dashboard_markdown(code_map: dict[str, Any]) -> str:
|
|
"""Create Markdown dashboard with embedded charts"""
|
|
stats = code_map.get("statistics", {})
|
|
project_name = code_map.get("project", {}).get("name", "Unknown")
|
|
|
|
total_scripts = stats.get("total_scripts", 0)
|
|
total_scenes = stats.get("total_scenes", 0)
|
|
total_functions = stats.get("total_functions", 0)
|
|
total_signals = stats.get("total_signals", 0)
|
|
total_connections = stats.get("total_connections", 0)
|
|
|
|
output = [
|
|
"# Code Metrics Dashboard",
|
|
"",
|
|
f"*Auto-generated: {project_name}*",
|
|
"",
|
|
"## Project Statistics",
|
|
"",
|
|
f"| Metric | Count |",
|
|
f"|--------|-------|",
|
|
f"| Scripts | {total_scripts} |",
|
|
f"| Scenes | {total_scenes} |",
|
|
f"| Functions | {total_functions} |",
|
|
f"| Signals | {total_signals} |",
|
|
f"| Connections | {total_connections} |",
|
|
"",
|
|
"## Statistics Overview",
|
|
"",
|
|
"",
|
|
"",
|
|
"## Complexity Analysis",
|
|
"",
|
|
"",
|
|
"",
|
|
]
|
|
|
|
return "\n".join(output)
|
|
|
|
|
|
def main():
|
|
"""Main entry point"""
|
|
parser = argparse.ArgumentParser(
|
|
description="Generate code maps, diagrams, and documentation"
|
|
)
|
|
parser.add_argument("--output", "-o", help="Custom output file (overrides default)")
|
|
parser.add_argument(
|
|
"--verbose", "-v", action="store_true", help="Enable verbose output"
|
|
)
|
|
parser.add_argument(
|
|
"--diagrams", action="store_true", help="Generate diagrams (Mermaid -> PNG)"
|
|
)
|
|
parser.add_argument(
|
|
"--docs", action="store_true", help="Generate human-readable markdown docs"
|
|
)
|
|
parser.add_argument(
|
|
"--metrics", action="store_true", help="Generate metrics dashboard HTML"
|
|
)
|
|
parser.add_argument(
|
|
"--all",
|
|
action="store_true",
|
|
help="Generate everything (maps + diagrams + docs + metrics)",
|
|
)
|
|
parser.add_argument(
|
|
"--no-render",
|
|
action="store_true",
|
|
help="Skip PNG rendering (Mermaid source only)",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
# Determine project root
|
|
project_root = Path(__file__).parent.parent
|
|
|
|
print("🗺️ Code Map Generator")
|
|
print("=" * 48)
|
|
print(f"Project root: {project_root}")
|
|
print()
|
|
|
|
# Determine what to generate
|
|
# If no specific flags provided, generate everything
|
|
no_flags = not any([args.diagrams, args.docs, args.metrics, args.all])
|
|
generate_maps = no_flags or args.all
|
|
generate_diagrams = no_flags or args.diagrams or args.all
|
|
generate_docs = no_flags or args.docs or args.all
|
|
generate_metrics = no_flags or args.metrics or args.all
|
|
|
|
# Generate map
|
|
generator = CodeMapGenerator(project_root, verbose=args.verbose)
|
|
code_map = generator.generate()
|
|
|
|
# Write output
|
|
if args.output:
|
|
# Custom output path (single JSON file)
|
|
output_path = project_root / args.output
|
|
if args.verbose:
|
|
print(f"\n💾 Writing to {output_path}...")
|
|
write_json_minified(code_map, output_path)
|
|
print(f"\n✅ Code map generated: {output_path}")
|
|
else:
|
|
# Default: generate based on flags
|
|
llm_dir, docs_dir = ensure_output_dirs(project_root)
|
|
diagrams_dir = llm_dir / "diagrams"
|
|
|
|
# Flush target directories before generating
|
|
if args.verbose:
|
|
print("\n🧹 Flushing target directories...")
|
|
|
|
if generate_maps:
|
|
if args.verbose:
|
|
print(f" Flushing {llm_dir}...")
|
|
# Flush only JSON map files, preserve diagrams subdirectory
|
|
for item in llm_dir.iterdir():
|
|
if item.is_file() and item.suffix == ".json":
|
|
item.unlink()
|
|
if args.verbose:
|
|
print(f" Removed: {item.name}")
|
|
|
|
if generate_diagrams:
|
|
if args.verbose:
|
|
print(f" Flushing {diagrams_dir}...")
|
|
flush_directory(diagrams_dir, args.verbose)
|
|
|
|
if generate_docs or generate_metrics:
|
|
if args.verbose:
|
|
print(f" Flushing {docs_dir}...")
|
|
flush_directory(docs_dir, args.verbose)
|
|
|
|
if args.verbose:
|
|
print("✅ Directories flushed\n")
|
|
|
|
# Generate JSON maps
|
|
if generate_maps:
|
|
if args.verbose:
|
|
print(f"\n💾 Writing JSON maps to {llm_dir}...")
|
|
|
|
# Extract and write specialized maps
|
|
api_map = _extract_api_map(code_map)
|
|
architecture_map = _extract_architecture_map(code_map)
|
|
flows_map = _extract_flows_map(code_map)
|
|
security_map = _extract_security_map(code_map)
|
|
assets_map = _extract_assets_map(code_map)
|
|
metadata_map = _extract_metadata_map(code_map)
|
|
|
|
write_json_minified(api_map, llm_dir / "code_map_api.json")
|
|
write_json_minified(
|
|
architecture_map, llm_dir / "code_map_architecture.json"
|
|
)
|
|
write_json_minified(flows_map, llm_dir / "code_map_flows.json")
|
|
write_json_minified(security_map, llm_dir / "code_map_security.json")
|
|
write_json_minified(assets_map, llm_dir / "code_map_assets.json")
|
|
write_json_minified(metadata_map, llm_dir / "code_map_metadata.json")
|
|
|
|
print("✅ JSON code maps generated")
|
|
|
|
# Generate diagrams FIRST (so they can be embedded in docs)
|
|
if generate_diagrams:
|
|
mmd_files = _generate_mermaid_diagrams(code_map, diagrams_dir, args.verbose)
|
|
|
|
if not args.no_render:
|
|
_render_mermaid_to_png(mmd_files, args.verbose)
|
|
else:
|
|
print(" ⏭️ Skipping PNG rendering (--no-render flag)")
|
|
|
|
# Generate markdown docs AFTER diagrams (so PNGs are available)
|
|
if generate_docs:
|
|
_generate_markdown_docs(code_map, docs_dir, diagrams_dir, args.verbose)
|
|
|
|
# Generate metrics dashboard AFTER diagrams (uses charts)
|
|
if generate_metrics:
|
|
_generate_metrics_dashboard(code_map, docs_dir, args.verbose)
|
|
|
|
print("\n📊 Statistics:")
|
|
print(f" - Scripts: {code_map['statistics']['total_scripts']}")
|
|
print(f" - Scenes: {code_map['statistics']['total_scenes']}")
|
|
print(f" - Functions: {code_map['statistics']['total_functions']}")
|
|
print(f" - Signals: {code_map['statistics']['total_signals']}")
|
|
print(f" - Connections: {code_map['statistics']['total_connections']}")
|
|
|
|
print("\n📁 Output locations:")
|
|
if generate_maps:
|
|
print(f" - JSON maps: {llm_dir}")
|
|
if generate_diagrams:
|
|
print(f" - Diagrams: {diagrams_dir}")
|
|
if generate_docs or generate_metrics:
|
|
print(f" - Documentation: {docs_dir}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|