2460 lines
85 KiB
Python
2460 lines
85 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Development workflow runner for the Skelly Godot project.
|
|
|
|
Runs code quality checks (linting, formatting, testing, validation) individually or together.
|
|
Provides colored output and error reporting.
|
|
|
|
Usage examples:
|
|
python tools/run_development.py # Run all steps (auto-async)
|
|
python tools/run_development.py --lint # Only GDScript linting
|
|
python tools/run_development.py --ruff # Only Python format & lint
|
|
python tools/run_development.py --validate # Only file validation
|
|
python tools/run_development.py --validate --silent # Silent validation (only errors)
|
|
python tools/run_development.py --naming # Only naming convention check
|
|
python tools/run_development.py --test --yaml # Test results in YAML format
|
|
python tools/run_development.py --steps lint ruff test # Custom workflow
|
|
python tools/run_development.py --async-mode # Force async mode
|
|
|
|
Features:
|
|
- GDScript formatting and linting
|
|
- Python formatting & linting with Ruff (fast formatter + linter with auto-fix)
|
|
- Test execution
|
|
- YAML, TOML, and JSON file validation (respects .gitignore)
|
|
- PascalCase naming convention checks for .tscn and .gd files
|
|
- Colored output and comprehensive error reporting
|
|
- Machine-readable YAML output for CI/CD integration
|
|
- **Async processing for significantly faster execution with multiple files**
|
|
|
|
Performance:
|
|
- Async mode automatically enabled for multiple steps when aiofiles is available
|
|
- Processes multiple files concurrently (3-5x speed improvement on larger codebases)
|
|
- Uses semaphores to prevent system overload
|
|
- Falls back to synchronous mode if async fails
|
|
|
|
NOTE: Handles "successful but noisy" linter output such as
|
|
"Success: no problems found" - treats these as clean instead of warnings.
|
|
"""
|
|
|
|
import argparse
|
|
import asyncio
|
|
import json
|
|
import os
|
|
import re
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
import warnings
|
|
from io import BytesIO
|
|
from pathlib import Path
|
|
from typing import Dict, List, Tuple
|
|
|
|
try:
|
|
import aiofiles
|
|
except ImportError:
|
|
aiofiles = None
|
|
|
|
try:
|
|
import yaml
|
|
except ImportError:
|
|
yaml = None
|
|
|
|
try:
|
|
import tomllib
|
|
except ImportError:
|
|
try:
|
|
import tomli as tomllib
|
|
except ImportError:
|
|
tomllib = None
|
|
|
|
# Suppress pkg_resources deprecation warning from gdtoolkit
|
|
warnings.filterwarnings(
|
|
"ignore", message="pkg_resources is deprecated", category=UserWarning
|
|
)
|
|
|
|
|
|
class Colors:
|
|
"""ANSI color codes for terminal output."""
|
|
|
|
# Basic colors
|
|
RED = "\033[91m"
|
|
GREEN = "\033[92m"
|
|
YELLOW = "\033[93m"
|
|
BLUE = "\033[94m"
|
|
MAGENTA = "\033[95m"
|
|
CYAN = "\033[96m"
|
|
WHITE = "\033[97m"
|
|
|
|
# Styles
|
|
BOLD = "\033[1m"
|
|
UNDERLINE = "\033[4m"
|
|
|
|
# Reset
|
|
RESET = "\033[0m"
|
|
|
|
@staticmethod
|
|
def colorize(text: str, color: str) -> str:
|
|
"""Add color to text if terminal supports it."""
|
|
if os.getenv("NO_COLOR") or not sys.stdout.isatty():
|
|
return text
|
|
return f"{color}{text}{Colors.RESET}"
|
|
|
|
|
|
def print_header(title: str, silent: bool = False) -> None:
|
|
"""Print a formatted header."""
|
|
if not silent:
|
|
separator = Colors.colorize("=" * 48, Colors.CYAN)
|
|
colored_title = Colors.colorize(title, Colors.BOLD + Colors.WHITE)
|
|
print(separator)
|
|
print(colored_title)
|
|
print(separator)
|
|
print()
|
|
|
|
|
|
def print_summary(title: str, stats: Dict[str, int], silent: bool = False) -> None:
|
|
"""Print a formatted summary."""
|
|
if not silent:
|
|
separator = Colors.colorize("=" * 48, Colors.CYAN)
|
|
colored_title = Colors.colorize(title, Colors.BOLD + Colors.WHITE)
|
|
print(separator)
|
|
print(colored_title)
|
|
print(separator)
|
|
for key, value in stats.items():
|
|
colored_key = Colors.colorize(key, Colors.BLUE)
|
|
colored_value = Colors.colorize(str(value), Colors.BOLD + Colors.WHITE)
|
|
print(f"{colored_key}: {colored_value}")
|
|
|
|
|
|
def output_yaml_results(step_name: str, results: Dict, success: bool) -> None:
|
|
"""Output results in YAML format."""
|
|
if yaml is None:
|
|
print("# YAML output requires PyYAML. Install with: pip install PyYAML")
|
|
return
|
|
|
|
# Convert results to YAML-friendly format
|
|
yaml_data = {
|
|
"step": step_name,
|
|
"success": success,
|
|
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
|
"statistics": {},
|
|
"failed_items": [],
|
|
}
|
|
|
|
# Extract statistics
|
|
for key, value in results.items():
|
|
if key != "failed_paths" and key != "results":
|
|
yaml_data["statistics"][key.lower().replace(" ", "_")] = value
|
|
|
|
# Extract failed items
|
|
if "failed_paths" in results:
|
|
yaml_data["failed_items"] = results["failed_paths"]
|
|
elif "results" in results:
|
|
# For test results, extract failed tests
|
|
failed_tests = [result[0] for result in results["results"] if not result[1]]
|
|
yaml_data["failed_items"] = failed_tests
|
|
|
|
print(yaml.dump(yaml_data, default_flow_style=False, sort_keys=False))
|
|
|
|
|
|
def run_command(
|
|
cmd: List[str], cwd: Path, timeout: int = 30
|
|
) -> subprocess.CompletedProcess:
|
|
"""
|
|
Execute a shell command with error handling and output filtering.
|
|
|
|
Filters out gdtoolkit's pkg_resources deprecation warnings.
|
|
|
|
Args:
|
|
cmd: Command and arguments to execute
|
|
cwd: Working directory for command execution
|
|
timeout: Maximum execution time in seconds (default: 30s)
|
|
|
|
Returns:
|
|
CompletedProcess with filtered stdout/stderr
|
|
"""
|
|
# Suppress pkg_resources deprecation warnings in subprocesses
|
|
env = os.environ.copy()
|
|
env["PYTHONWARNINGS"] = "ignore::UserWarning:pkg_resources"
|
|
result = subprocess.run(
|
|
cmd, capture_output=True, text=True, cwd=cwd, timeout=timeout, env=env
|
|
)
|
|
|
|
# Filter out pkg_resources deprecation warnings from the output
|
|
def filter_warnings(text: str) -> str:
|
|
if not text:
|
|
return text
|
|
lines = text.split("\n")
|
|
filtered_lines = []
|
|
skip_next = False
|
|
|
|
for line in lines:
|
|
if skip_next:
|
|
skip_next = False
|
|
continue
|
|
if "pkg_resources is deprecated" in line:
|
|
skip_next = (
|
|
True # Skip the next line which contains "import pkg_resources"
|
|
)
|
|
continue
|
|
if "import pkg_resources" in line:
|
|
continue
|
|
filtered_lines.append(line)
|
|
|
|
return "\n".join(filtered_lines)
|
|
|
|
# Create a new result with filtered output
|
|
result.stdout = filter_warnings(result.stdout)
|
|
result.stderr = filter_warnings(result.stderr)
|
|
|
|
return result
|
|
|
|
|
|
async def run_command_async(
|
|
cmd: List[str], cwd: Path, timeout: int = 30
|
|
) -> Tuple[int, str, str]:
|
|
"""
|
|
Execute a shell command asynchronously with error handling and output filtering.
|
|
|
|
Filters out gdtoolkit's pkg_resources deprecation warnings.
|
|
|
|
Args:
|
|
cmd: Command and arguments to execute
|
|
cwd: Working directory for command execution
|
|
timeout: Maximum execution time in seconds (default: 30s)
|
|
|
|
Returns:
|
|
Tuple of (returncode, stdout, stderr)
|
|
"""
|
|
# Suppress pkg_resources deprecation warnings in subprocesses
|
|
env = os.environ.copy()
|
|
env["PYTHONWARNINGS"] = "ignore::UserWarning:pkg_resources"
|
|
|
|
try:
|
|
process = await asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=asyncio.subprocess.PIPE,
|
|
stderr=asyncio.subprocess.PIPE,
|
|
cwd=cwd,
|
|
env=env,
|
|
)
|
|
|
|
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
|
|
|
|
stdout_text = stdout.decode("utf-8", errors="replace")
|
|
stderr_text = stderr.decode("utf-8", errors="replace")
|
|
|
|
# Filter out pkg_resources deprecation warnings from the output
|
|
def filter_warnings(text: str) -> str:
|
|
if not text:
|
|
return text
|
|
lines = text.split("\n")
|
|
filtered_lines = []
|
|
skip_next = False
|
|
|
|
for line in lines:
|
|
if skip_next:
|
|
skip_next = False
|
|
continue
|
|
if "pkg_resources is deprecated" in line:
|
|
skip_next = (
|
|
True # Skip the next line which contains "import pkg_resources"
|
|
)
|
|
continue
|
|
if "import pkg_resources" in line:
|
|
continue
|
|
filtered_lines.append(line)
|
|
|
|
return "\n".join(filtered_lines)
|
|
|
|
return (
|
|
process.returncode,
|
|
filter_warnings(stdout_text),
|
|
filter_warnings(stderr_text),
|
|
)
|
|
|
|
except asyncio.TimeoutError:
|
|
if process.returncode is None:
|
|
process.kill()
|
|
await process.wait()
|
|
raise subprocess.TimeoutExpired(cmd, timeout)
|
|
|
|
|
|
def should_skip_file(file_path: Path) -> bool:
|
|
"""Check if file should be skipped."""
|
|
return file_path.name == "TestHelper.gd"
|
|
|
|
|
|
def print_skip_message(tool: str) -> None:
|
|
"""Print skip message for TestHelper.gd."""
|
|
message = f"⚠️ Skipped (static var syntax not supported by {tool})"
|
|
colored_message = Colors.colorize(message, Colors.YELLOW)
|
|
print(f" {colored_message}")
|
|
|
|
|
|
def print_result(success: bool, output: str = "", silent: bool = False) -> None:
|
|
"""Print command result."""
|
|
if success:
|
|
if not output:
|
|
if not silent:
|
|
message = "✅ Clean"
|
|
colored_message = Colors.colorize(message, Colors.GREEN)
|
|
print(f" {colored_message}")
|
|
else:
|
|
# Check if output contains success indicators
|
|
if (
|
|
"✅" in output
|
|
or "All checks passed" in output
|
|
or "Formatted and" in output
|
|
):
|
|
# This is a success message, display it directly
|
|
if not silent:
|
|
colored_output = Colors.colorize(output, Colors.GREEN)
|
|
print(f" {colored_output}")
|
|
else:
|
|
# This is a warning message
|
|
message = "⚠️ WARNINGS found:"
|
|
colored_message = Colors.colorize(message, Colors.YELLOW)
|
|
print(f" {colored_message}")
|
|
# Indent and color the output
|
|
for line in output.split("\n"):
|
|
if line.strip():
|
|
colored_line = Colors.colorize(line, Colors.YELLOW)
|
|
print(f" {colored_line}")
|
|
else:
|
|
message = "❌ ERRORS found:"
|
|
colored_message = Colors.colorize(message, Colors.RED)
|
|
print(f" {colored_message}")
|
|
if output:
|
|
# Indent and color the output
|
|
for line in output.split("\n"):
|
|
if line.strip():
|
|
colored_line = Colors.colorize(line, Colors.RED)
|
|
print(f" {colored_line}")
|
|
|
|
|
|
def get_gd_files(project_root: Path) -> List[Path]:
|
|
"""Get all .gd files in the project, respecting gitignore."""
|
|
gitignore_patterns = read_gitignore(project_root)
|
|
gd_files = []
|
|
|
|
for gd_file in project_root.rglob("*.gd"):
|
|
if gd_file.is_file() and not is_ignored_by_gitignore(
|
|
gd_file, project_root, gitignore_patterns
|
|
):
|
|
gd_files.append(gd_file)
|
|
|
|
return gd_files
|
|
|
|
|
|
def get_py_files(project_root: Path) -> List[Path]:
|
|
"""Get all .py files in the project, respecting gitignore."""
|
|
gitignore_patterns = read_gitignore(project_root)
|
|
py_files = []
|
|
|
|
for py_file in project_root.rglob("*.py"):
|
|
if py_file.is_file() and not is_ignored_by_gitignore(
|
|
py_file, project_root, gitignore_patterns
|
|
):
|
|
py_files.append(py_file)
|
|
|
|
return py_files
|
|
|
|
|
|
def read_gitignore(project_root: Path) -> List[str]:
|
|
"""Read gitignore patterns from .gitignore file."""
|
|
gitignore_path = project_root / ".gitignore"
|
|
patterns = []
|
|
|
|
if gitignore_path.exists():
|
|
try:
|
|
with open(gitignore_path, "r", encoding="utf-8") as f:
|
|
for line in f:
|
|
line = line.strip()
|
|
if line and not line.startswith("#"):
|
|
patterns.append(line)
|
|
except Exception as e:
|
|
print(f"Warning: Could not read .gitignore: {e}")
|
|
|
|
return patterns
|
|
|
|
|
|
def is_ignored_by_gitignore(
|
|
file_path: Path, project_root: Path, patterns: List[str]
|
|
) -> bool:
|
|
"""Check if a file should be ignored based on gitignore patterns."""
|
|
relative_path = file_path.relative_to(project_root)
|
|
path_str = str(relative_path).replace("\\", "/")
|
|
|
|
for pattern in patterns:
|
|
# Handle directory patterns
|
|
if pattern.endswith("/"):
|
|
if any(part == pattern[:-1] for part in relative_path.parts):
|
|
return True
|
|
# Handle file patterns
|
|
elif pattern in path_str or relative_path.name == pattern:
|
|
return True
|
|
# Handle glob patterns (simple implementation)
|
|
elif "*" in pattern:
|
|
import fnmatch
|
|
|
|
if fnmatch.fnmatch(path_str, pattern) or fnmatch.fnmatch(
|
|
relative_path.name, pattern
|
|
):
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
def get_validation_files(project_root: Path) -> Dict[str, List[Path]]:
|
|
"""Get all YAML, TOML, and JSON files in the project, respecting gitignore."""
|
|
gitignore_patterns = read_gitignore(project_root)
|
|
|
|
file_types = {"yaml": ["*.yaml", "*.yml"], "toml": ["*.toml"], "json": ["*.json"]}
|
|
|
|
files = {file_type: [] for file_type in file_types}
|
|
|
|
for file_type, patterns in file_types.items():
|
|
for pattern in patterns:
|
|
for file_path in project_root.rglob(pattern):
|
|
if file_path.is_file() and not is_ignored_by_gitignore(
|
|
file_path, project_root, gitignore_patterns
|
|
):
|
|
files[file_type].append(file_path)
|
|
|
|
return files
|
|
|
|
|
|
def format_test_name(filename: str) -> str:
|
|
"""Convert test_filename to readable test name."""
|
|
return filename.replace("test_", "").replace("_", " ")
|
|
|
|
|
|
def _is_successful_linter_output(output: str) -> bool:
|
|
"""Interpret noisy but-successful linter output as clean.
|
|
|
|
Some linters print friendly messages even when they exit with 0. Treat
|
|
those as clean rather than "warnings". This function centralizes the
|
|
heuristics.
|
|
"""
|
|
if not output or not output.strip():
|
|
return True
|
|
|
|
# Common success patterns to treat as clean. Case-insensitive.
|
|
success_patterns = [
|
|
r"no problems found",
|
|
r"0 problems",
|
|
r"no issues found",
|
|
r"success: no problems found",
|
|
r"all good",
|
|
r"ok$",
|
|
]
|
|
|
|
text = output.lower()
|
|
for pat in success_patterns:
|
|
if re.search(pat, text):
|
|
return True
|
|
return False
|
|
|
|
|
|
def run_lint(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run gdlint on all GDScript files."""
|
|
if not yaml_output:
|
|
print_header("🔍 GDScript Linter", silent)
|
|
|
|
gd_files = get_gd_files(project_root)
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Found {len(gd_files)} GDScript files to lint."
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(f"{colored_count}\n")
|
|
|
|
clean_files = warning_files = error_files = 0
|
|
failed_paths = []
|
|
|
|
for gd_file in gd_files:
|
|
relative_path = gd_file.relative_to(project_root)
|
|
if not silent and not yaml_output:
|
|
file_msg = f"📄 Linting: {relative_path.name}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
print(colored_file)
|
|
|
|
if should_skip_file(gd_file):
|
|
if not silent and not yaml_output:
|
|
print_skip_message("gdlint")
|
|
clean_files += 1
|
|
if not silent and not yaml_output:
|
|
print()
|
|
continue
|
|
|
|
try:
|
|
result = run_command(["gdlint", str(gd_file)], project_root)
|
|
output = (result.stdout + result.stderr).strip()
|
|
|
|
if result.returncode == 0:
|
|
# If output is "no problems" (or similar), treat as clean.
|
|
if _is_successful_linter_output(output):
|
|
clean_files += 1
|
|
if not yaml_output:
|
|
print_result(True, "", silent)
|
|
else:
|
|
warning_files += 1
|
|
if not yaml_output:
|
|
print_result(True, output, silent)
|
|
else:
|
|
error_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
if not yaml_output:
|
|
print_result(False, output, silent)
|
|
|
|
except FileNotFoundError:
|
|
if not silent and not yaml_output:
|
|
print(" ❌ ERROR: gdlint not found")
|
|
return False, {}
|
|
except Exception as e:
|
|
if not silent and not yaml_output:
|
|
print(f" ❌ ERROR: {e}")
|
|
error_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
|
|
# Summary
|
|
stats = {
|
|
"Total files": len(gd_files),
|
|
"Clean files": clean_files,
|
|
"Files with warnings": warning_files,
|
|
"Files with errors": error_files,
|
|
}
|
|
|
|
success = error_files == 0
|
|
|
|
if yaml_output:
|
|
output_yaml_results("lint", {**stats, "failed_paths": failed_paths}, success)
|
|
else:
|
|
print_summary("Linting Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if not success:
|
|
msg = "❌ Linting FAILED - Please fix the errors above"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif warning_files > 0:
|
|
msg = "⚠️ Linting PASSED with warnings - Consider fixing them"
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "✅ All GDScript files passed linting!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif not success:
|
|
# In silent mode, still show failed files
|
|
for failed_path in failed_paths:
|
|
print(f"❌ {failed_path}")
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
def validate_yaml_file(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a YAML file."""
|
|
if yaml is None:
|
|
return False, "PyYAML not installed. Install with: pip install PyYAML"
|
|
|
|
try:
|
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
yaml.safe_load(f)
|
|
return True, ""
|
|
except yaml.YAMLError as e:
|
|
return False, f"YAML syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
async def validate_yaml_file_async(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a YAML file asynchronously."""
|
|
if yaml is None:
|
|
return False, "PyYAML not installed. Install with: pip install PyYAML"
|
|
|
|
if aiofiles is None:
|
|
return validate_yaml_file(file_path)
|
|
|
|
try:
|
|
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
|
content = await f.read()
|
|
yaml.safe_load(content)
|
|
return True, ""
|
|
except yaml.YAMLError as e:
|
|
return False, f"YAML syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
def validate_toml_file(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a TOML file."""
|
|
if tomllib is None:
|
|
return (
|
|
False,
|
|
"tomllib/tomli not available. For Python 3.11+, it's built-in. For older versions: pip install tomli",
|
|
)
|
|
|
|
try:
|
|
with open(file_path, "rb") as f:
|
|
tomllib.load(f)
|
|
return True, ""
|
|
except tomllib.TOMLDecodeError as e:
|
|
return False, f"TOML syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
async def validate_toml_file_async(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a TOML file asynchronously."""
|
|
if tomllib is None:
|
|
return (
|
|
False,
|
|
"tomllib/tomli not available. For Python 3.11+, it's built-in. For older versions: pip install tomli",
|
|
)
|
|
|
|
if aiofiles is None:
|
|
return validate_toml_file(file_path)
|
|
|
|
try:
|
|
async with aiofiles.open(file_path, "rb") as f:
|
|
content = await f.read()
|
|
tomllib.load(BytesIO(content))
|
|
return True, ""
|
|
except tomllib.TOMLDecodeError as e:
|
|
return False, f"TOML syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
def validate_json_file(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a JSON file."""
|
|
try:
|
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
json.load(f)
|
|
return True, ""
|
|
except json.JSONDecodeError as e:
|
|
return False, f"JSON syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
def check_naming_convention(file_path: Path) -> Tuple[bool, str]:
|
|
"""Check if file follows PascalCase naming convention for .tscn and .gd files."""
|
|
file_name = file_path.name
|
|
|
|
# Skip files that shouldn't follow PascalCase
|
|
if file_path.suffix not in [".tscn", ".gd"]:
|
|
return True, ""
|
|
|
|
# Skip certain directories and files
|
|
skip_dirs = {"autoloads", "helpers"}
|
|
if any(part in skip_dirs for part in file_path.parts):
|
|
return True, ""
|
|
|
|
# Skip project-specific files that are exempt
|
|
exempt_files = {
|
|
"project.godot",
|
|
"icon.svg",
|
|
"export_presets.cfg",
|
|
"default_bus_layout.tres",
|
|
}
|
|
if file_name in exempt_files:
|
|
return True, ""
|
|
|
|
# Check PascalCase pattern
|
|
name_without_ext = file_path.stem
|
|
|
|
# PascalCase: starts with capital letter, can have more capitals, no underscores or hyphens
|
|
if not re.match(r"^[A-Z][a-zA-Z0-9]*$", name_without_ext):
|
|
return (
|
|
False,
|
|
f"File name '{file_name}' should use PascalCase (e.g., 'MainMenu.gd', 'Match3Gameplay.tscn')",
|
|
)
|
|
|
|
return True, ""
|
|
|
|
|
|
def get_naming_files(project_root: Path) -> List[Path]:
|
|
"""Get all .tscn and .gd files that should follow naming conventions, respecting gitignore."""
|
|
gitignore_patterns = read_gitignore(project_root)
|
|
files = []
|
|
|
|
for pattern in ["**/*.tscn", "**/*.gd"]:
|
|
for file_path in project_root.glob(pattern):
|
|
if file_path.is_file():
|
|
files.append(file_path)
|
|
|
|
# Filter out files that should be ignored (gitignore + TestHelper)
|
|
filtered_files = []
|
|
for file_path in files:
|
|
if not should_skip_file(file_path) and not is_ignored_by_gitignore(
|
|
file_path, project_root, gitignore_patterns
|
|
):
|
|
filtered_files.append(file_path)
|
|
|
|
return filtered_files
|
|
|
|
|
|
async def validate_json_file_async(file_path: Path) -> Tuple[bool, str]:
|
|
"""Validate a JSON file asynchronously."""
|
|
if aiofiles is None:
|
|
return validate_json_file(file_path)
|
|
|
|
try:
|
|
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
|
content = await f.read()
|
|
json.loads(content)
|
|
return True, ""
|
|
except json.JSONDecodeError as e:
|
|
return False, f"JSON syntax error: {e}"
|
|
except Exception as e:
|
|
return False, f"Error reading file: {e}"
|
|
|
|
|
|
async def process_file_async(
|
|
file_path: Path,
|
|
command: list,
|
|
project_root: Path,
|
|
semaphore: asyncio.Semaphore,
|
|
tool_name: str,
|
|
icon: str = "📄",
|
|
skip_check: bool = True,
|
|
silent: bool = False,
|
|
) -> dict:
|
|
"""Generic async file processor - DRY principle."""
|
|
async with semaphore:
|
|
relative_path = file_path.relative_to(project_root)
|
|
result = {
|
|
"success": False,
|
|
"output": "",
|
|
"path": str(relative_path),
|
|
"display": "",
|
|
}
|
|
|
|
# Check if file should be skipped
|
|
if skip_check and should_skip_file(file_path):
|
|
result["success"] = True
|
|
if not silent: # Only show skip message if not silent
|
|
result["display"] = f"{icon} {tool_name}: {relative_path.name}\n"
|
|
result["display"] += (
|
|
f" ⚠️ Skipped (static var syntax not supported by {tool_name.lower()})"
|
|
)
|
|
return result
|
|
|
|
try:
|
|
returncode, stdout, stderr = await run_command_async(command, project_root)
|
|
command_output = (stdout + stderr).strip()
|
|
|
|
if returncode == 0:
|
|
result["success"] = True
|
|
# Special handling for linting success detection
|
|
if tool_name == "Linting" and not _is_successful_linter_output(
|
|
command_output
|
|
):
|
|
result["output"] = command_output
|
|
result["display"] = f"{icon} {tool_name}: {relative_path.name}\n"
|
|
result["display"] += " ⚠️ WARNINGS found:\n"
|
|
result["display"] += _format_output_lines(
|
|
command_output, Colors.YELLOW
|
|
)
|
|
else:
|
|
# Success - only show if not silent
|
|
if not silent:
|
|
result["display"] = (
|
|
f"{icon} {tool_name}: {relative_path.name}\n"
|
|
)
|
|
result["display"] += " ✅ Success"
|
|
else:
|
|
# Always show errors, even in silent mode
|
|
result["output"] = command_output
|
|
result["display"] = f"{icon} {tool_name}: {relative_path.name}\n"
|
|
result["display"] += " ❌ ERRORS found:\n"
|
|
result["display"] += _format_output_lines(command_output, Colors.RED)
|
|
|
|
except FileNotFoundError:
|
|
# Always show errors, even in silent mode
|
|
result["display"] = f"{icon} {tool_name}: {relative_path.name}\n"
|
|
result["display"] += f" ❌ ERROR: {tool_name.lower()} not found"
|
|
except Exception as e:
|
|
# Always show errors, even in silent mode
|
|
result["display"] = f"{icon} {tool_name}: {relative_path.name}\n"
|
|
result["display"] += f" ❌ ERROR: {e}"
|
|
|
|
return result
|
|
|
|
|
|
def _format_output_lines(text: str, color: str) -> str:
|
|
"""Format command output with indentation and color."""
|
|
if not text:
|
|
return ""
|
|
lines = []
|
|
for line in text.split("\n"):
|
|
if line.strip():
|
|
colored_line = Colors.colorize(f" {line}", color)
|
|
lines.append(colored_line)
|
|
return "\n".join(lines)
|
|
|
|
|
|
async def run_tool_async(
|
|
project_root: Path,
|
|
tool_config: dict,
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
) -> Tuple[bool, Dict]:
|
|
"""Generic async tool runner - DRY principle."""
|
|
files = tool_config["get_files"](project_root)
|
|
|
|
# Calculate total files
|
|
if tool_config["tool"] == "validate":
|
|
total_files = sum(len(type_files) for type_files in files.values())
|
|
else:
|
|
total_files = len(files)
|
|
|
|
if not yaml_output:
|
|
print_header(f"{tool_config['icon']} {tool_config['name']} (Async)", silent)
|
|
if not silent:
|
|
count_msg = f"Found {total_files} {tool_config['file_type']} files to {tool_config['action']}."
|
|
print(f"{Colors.colorize(count_msg, Colors.BLUE)}\n")
|
|
|
|
semaphore = asyncio.Semaphore(
|
|
min(tool_config.get("max_concurrent", 10), total_files)
|
|
)
|
|
|
|
# Process files concurrently
|
|
tasks = []
|
|
if tool_config["tool"] == "validate":
|
|
# Validation needs file type info
|
|
for file_type, type_files in files.items():
|
|
for f in type_files:
|
|
tasks.append(
|
|
process_validation_file_async(
|
|
f, file_type, project_root, semaphore, silent, yaml_output
|
|
)
|
|
)
|
|
else:
|
|
for file_path in files:
|
|
command = tool_config["command"](file_path)
|
|
tasks.append(
|
|
process_file_async(
|
|
file_path,
|
|
command,
|
|
project_root,
|
|
semaphore,
|
|
tool_config["name"],
|
|
tool_config["icon"],
|
|
True,
|
|
silent,
|
|
)
|
|
)
|
|
|
|
if not tasks:
|
|
return True, {
|
|
"Total files": 0,
|
|
"Clean files": 0,
|
|
"Files with warnings": 0,
|
|
"Files with errors": 0,
|
|
}
|
|
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
# Process results
|
|
stats = {
|
|
"Total files": len(results),
|
|
"Clean files": 0,
|
|
"Files with warnings": 0,
|
|
"Files with errors": 0,
|
|
}
|
|
failed_paths = []
|
|
|
|
for result in results:
|
|
if isinstance(result, Exception):
|
|
stats["Files with errors"] += 1
|
|
failed_paths.append("Unknown file - exception occurred")
|
|
else:
|
|
if tool_config["tool"] == "validate":
|
|
success, path, display = result
|
|
if display and not yaml_output:
|
|
print(display)
|
|
print()
|
|
else:
|
|
success = result["success"]
|
|
path = result["path"]
|
|
if result["display"] and not yaml_output:
|
|
print(result["display"])
|
|
print()
|
|
|
|
if success:
|
|
if tool_config["tool"] == "lint" and result.get("output"):
|
|
stats["Files with warnings"] += 1
|
|
else:
|
|
stats["Clean files"] += 1
|
|
else:
|
|
stats["Files with errors"] += 1
|
|
failed_paths.append(path)
|
|
|
|
success = stats["Files with errors"] == 0
|
|
|
|
# Output results
|
|
if yaml_output:
|
|
output_yaml_results(
|
|
tool_config["tool"], {**stats, "failed_paths": failed_paths}, success
|
|
)
|
|
else:
|
|
_print_tool_summary(tool_config, stats, failed_paths, success, silent)
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
def _print_tool_summary(
|
|
tool_config: dict, stats: dict, failed_paths: list, success: bool, silent: bool
|
|
):
|
|
"""Print tool execution summary."""
|
|
print_summary(f"{tool_config['name']} Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if not success:
|
|
msg = f"❌ {tool_config['name']} FAILED - Please fix the errors above"
|
|
print(Colors.colorize(msg, Colors.RED + Colors.BOLD))
|
|
elif stats.get("Files with warnings", 0) > 0:
|
|
msg = (
|
|
f"⚠️ {tool_config['name']} PASSED with warnings - Consider fixing them"
|
|
)
|
|
print(Colors.colorize(msg, Colors.YELLOW + Colors.BOLD))
|
|
else:
|
|
msg = f"✅ All files passed {tool_config['name'].lower()}!"
|
|
print(Colors.colorize(msg, Colors.GREEN + Colors.BOLD))
|
|
elif not success:
|
|
for path in failed_paths:
|
|
print(f"❌ {path}")
|
|
|
|
|
|
# Simplified async functions
|
|
async def run_lint_async(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
return await run_tool_async(
|
|
project_root,
|
|
{
|
|
"name": "GDScript Linter",
|
|
"icon": "🔍",
|
|
"tool": "lint",
|
|
"action": "lint",
|
|
"file_type": "GDScript",
|
|
"get_files": get_gd_files,
|
|
"command": lambda f: ["gdlint", str(f)],
|
|
},
|
|
silent,
|
|
yaml_output,
|
|
)
|
|
|
|
|
|
async def run_format_async(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
return await run_tool_async(
|
|
project_root,
|
|
{
|
|
"name": "GDScript Formatter",
|
|
"icon": "🎨",
|
|
"tool": "format",
|
|
"action": "format",
|
|
"file_type": "GDScript",
|
|
"get_files": get_gd_files,
|
|
"command": lambda f: ["gdformat", str(f)],
|
|
},
|
|
silent,
|
|
yaml_output,
|
|
)
|
|
|
|
|
|
async def process_ruff_file_async(
|
|
py_file: Path,
|
|
project_root: Path,
|
|
semaphore: asyncio.Semaphore,
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
fix: bool = True,
|
|
) -> dict:
|
|
"""Process a single Python file with ruff format + check + fix."""
|
|
async with semaphore:
|
|
relative_path = py_file.relative_to(project_root)
|
|
result = {
|
|
"success": False,
|
|
"output": "",
|
|
"path": str(relative_path),
|
|
"display": "",
|
|
}
|
|
|
|
try:
|
|
# Step 1: Format the file
|
|
format_returncode, format_stdout, format_stderr = await run_command_async(
|
|
["ruff", "format", str(py_file)], project_root
|
|
)
|
|
|
|
# Step 2: Run linting with --fix if enabled
|
|
lint_cmd = ["ruff", "check", str(py_file)]
|
|
if fix:
|
|
lint_cmd.append("--fix")
|
|
|
|
lint_returncode, lint_stdout, lint_stderr = await run_command_async(
|
|
lint_cmd, project_root
|
|
)
|
|
|
|
lint_output = (lint_stdout + lint_stderr).strip()
|
|
|
|
# Build display output
|
|
if not silent:
|
|
result["display"] = (
|
|
f"🐍 Python Formatter & Linter: {relative_path.name}\n"
|
|
)
|
|
|
|
# Check if formatting failed
|
|
if format_returncode != 0:
|
|
format_error = (format_stdout + format_stderr).strip()
|
|
result["output"] = format_error
|
|
if not silent:
|
|
result["display"] += f" ❌ Format error: {format_error}"
|
|
return result
|
|
|
|
# Check linting results
|
|
elif lint_returncode == 0:
|
|
result["success"] = True
|
|
if not silent:
|
|
if lint_output:
|
|
result["display"] += f" ✅ Formatted and fixed: {lint_output}"
|
|
else:
|
|
result["display"] += " ✅ Formatted and clean"
|
|
else:
|
|
# Linting issues that couldn't be auto-fixed
|
|
result["output"] = lint_output
|
|
if fix:
|
|
result["success"] = True # Still success, but with warnings
|
|
if not silent:
|
|
result["display"] += " ⚠️ Formatted, remaining issues:\n"
|
|
result["display"] += _format_output_lines(
|
|
lint_output, Colors.YELLOW
|
|
)
|
|
else:
|
|
if not silent:
|
|
result["display"] += " ❌ Lint errors:\n"
|
|
result["display"] += _format_output_lines(
|
|
lint_output, Colors.RED
|
|
)
|
|
|
|
except FileNotFoundError:
|
|
if not silent:
|
|
result["display"] = (
|
|
f"🐍 Python Formatter & Linter: {relative_path.name}\n"
|
|
)
|
|
result["display"] += " ❌ ERROR: ruff not found"
|
|
except Exception as e:
|
|
if not silent:
|
|
result["display"] = (
|
|
f"🐍 Python Formatter & Linter: {relative_path.name}\n"
|
|
)
|
|
result["display"] += f" ❌ ERROR: {e}"
|
|
|
|
return result
|
|
|
|
|
|
async def run_ruff_async(
|
|
project_root: Path,
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
fix: bool = True,
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run ruff format + check with --fix on all Python files asynchronously."""
|
|
if not yaml_output:
|
|
print_header("🐍 Python Formatter & Linter (Ruff Async)", silent)
|
|
|
|
py_files = get_py_files(project_root)
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Found {len(py_files)} Python files to format and lint."
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(f"{colored_count}\n")
|
|
|
|
clean_files = warning_files = error_files = 0
|
|
failed_paths = []
|
|
|
|
# Use semaphore to limit concurrent operations
|
|
semaphore = asyncio.Semaphore(min(10, len(py_files)))
|
|
|
|
# Process files concurrently
|
|
tasks = [
|
|
process_ruff_file_async(
|
|
py_file, project_root, semaphore, silent, yaml_output, fix
|
|
)
|
|
for py_file in py_files
|
|
]
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
for result in results:
|
|
if isinstance(result, Exception):
|
|
error_files += 1
|
|
failed_paths.append("Unknown file - exception occurred")
|
|
else:
|
|
# Print the output from this file processing
|
|
if result["display"] and not yaml_output:
|
|
print(result["display"])
|
|
print() # Add spacing between files
|
|
|
|
if result["success"]:
|
|
if result["output"]:
|
|
warning_files += 1 # Has warnings but succeeded
|
|
else:
|
|
clean_files += 1
|
|
else:
|
|
error_files += 1
|
|
failed_paths.append(result["path"])
|
|
|
|
# Summary
|
|
stats = {
|
|
"Total files": len(py_files),
|
|
"Clean files": clean_files,
|
|
"Files with warnings": warning_files,
|
|
"Files with errors": error_files,
|
|
}
|
|
|
|
success = error_files == 0
|
|
|
|
if yaml_output:
|
|
output_yaml_results("ruff", {**stats, "failed_paths": failed_paths}, success)
|
|
else:
|
|
action_name = (
|
|
"Python Format & Lint (with auto-fix)" if fix else "Python Format & Lint"
|
|
)
|
|
print_summary(f"{action_name} Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if not success:
|
|
msg = "❌ Python processing FAILED - Please fix the errors above"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif warning_files > 0:
|
|
msg = "⚠️ Python files processed with remaining lint warnings"
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "✅ All Python files formatted and linted successfully!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif not success:
|
|
for failed_path in failed_paths:
|
|
print(f"❌ {failed_path}")
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
def run_validate(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run validation on YAML, TOML, and JSON files."""
|
|
if not silent and not yaml_output:
|
|
print_header("📋 File Format Validation")
|
|
|
|
# Get all validation files
|
|
validation_files = get_validation_files(project_root)
|
|
total_files = sum(len(files) for files in validation_files.values())
|
|
|
|
if total_files == 0:
|
|
if not silent:
|
|
msg = "No YAML, TOML, or JSON files found to validate."
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW)
|
|
print(colored_msg)
|
|
return True, {"Total files": 0, "Valid files": 0, "Invalid files": 0}
|
|
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Found {total_files} files to validate:"
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(colored_count)
|
|
|
|
for file_type, files in validation_files.items():
|
|
if files:
|
|
type_msg = f" {file_type.upper()}: {len(files)} files"
|
|
colored_type = Colors.colorize(type_msg, Colors.CYAN)
|
|
print(colored_type)
|
|
|
|
print()
|
|
|
|
valid_files = invalid_files = 0
|
|
failed_paths = []
|
|
|
|
# Validation functions mapping
|
|
validators = {
|
|
"yaml": validate_yaml_file,
|
|
"toml": validate_toml_file,
|
|
"json": validate_json_file,
|
|
}
|
|
|
|
# Validate each file type
|
|
for file_type, files in validation_files.items():
|
|
if not files:
|
|
continue
|
|
|
|
validator = validators[file_type]
|
|
if not silent and not yaml_output:
|
|
type_header = f"🔍 Validating {file_type.upper()} files"
|
|
colored_header = Colors.colorize(type_header, Colors.MAGENTA + Colors.BOLD)
|
|
print(colored_header)
|
|
|
|
for file_path in files:
|
|
relative_path = file_path.relative_to(project_root)
|
|
if not silent and not yaml_output:
|
|
file_msg = f"📄 Validating: {relative_path}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
print(colored_file)
|
|
|
|
is_valid, error_msg = validator(file_path)
|
|
|
|
if is_valid:
|
|
valid_files += 1
|
|
if not yaml_output:
|
|
print_result(True, "", silent)
|
|
else:
|
|
invalid_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
if not yaml_output:
|
|
print_result(False, error_msg, silent)
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
|
|
# Summary
|
|
stats = {
|
|
"Total files": total_files,
|
|
"Valid files": valid_files,
|
|
"Invalid files": invalid_files,
|
|
}
|
|
|
|
success = invalid_files == 0
|
|
|
|
if yaml_output:
|
|
output_yaml_results(
|
|
"validate", {**stats, "failed_paths": failed_paths}, success
|
|
)
|
|
else:
|
|
if not silent:
|
|
print_summary("Validation Summary", stats)
|
|
print()
|
|
if not success:
|
|
msg = "❌ File validation FAILED - Please fix the syntax errors above"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "✅ All files passed validation!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif not success:
|
|
# In silent mode, still show errors
|
|
for failed_path in failed_paths:
|
|
print(f"❌ {failed_path}")
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
async def process_validation_file_async(
|
|
file_path: Path,
|
|
file_type: str,
|
|
project_root: Path,
|
|
semaphore: asyncio.Semaphore,
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
) -> Tuple[bool, str, str]:
|
|
"""Process a single file for validation asynchronously."""
|
|
async with semaphore:
|
|
relative_path = file_path.relative_to(project_root)
|
|
output_lines = []
|
|
|
|
# Validation functions mapping
|
|
validators = {
|
|
"yaml": validate_yaml_file_async,
|
|
"toml": validate_toml_file_async,
|
|
"json": validate_json_file_async,
|
|
}
|
|
|
|
validator = validators[file_type]
|
|
is_valid, error_msg = await validator(file_path)
|
|
|
|
if is_valid:
|
|
# Success - only show if not silent
|
|
if not silent and not yaml_output:
|
|
file_msg = f"📄 Validating: {relative_path}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
output_lines.append(colored_file)
|
|
success_msg = " ✅ Clean"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN)
|
|
output_lines.append(colored_success)
|
|
return True, str(relative_path), "\n".join(output_lines)
|
|
else:
|
|
# Always show errors, even in silent mode
|
|
if not yaml_output:
|
|
file_msg = f"📄 Validating: {relative_path}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
output_lines.append(colored_file)
|
|
error_msg_display = " ❌ ERRORS found:"
|
|
colored_error = Colors.colorize(error_msg_display, Colors.RED)
|
|
output_lines.append(colored_error)
|
|
if error_msg:
|
|
# Indent and color the output
|
|
for line in error_msg.split("\n"):
|
|
if line.strip():
|
|
colored_line = Colors.colorize(f" {line}", Colors.RED)
|
|
output_lines.append(colored_line)
|
|
return False, str(relative_path), "\n".join(output_lines)
|
|
|
|
|
|
async def run_validate_async(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
return await run_tool_async(
|
|
project_root,
|
|
{
|
|
"name": "File Format Validation",
|
|
"icon": "📋",
|
|
"tool": "validate",
|
|
"action": "validate",
|
|
"file_type": "config",
|
|
"get_files": get_validation_files,
|
|
"command": None, # Validation uses different processing
|
|
},
|
|
silent,
|
|
yaml_output,
|
|
)
|
|
|
|
|
|
def run_codemap(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Generate code map - JSON format only"""
|
|
if not silent and not yaml_output:
|
|
print_header("🗺️ Code Map Generator")
|
|
|
|
try:
|
|
result = run_command(
|
|
["python", "tools/generate_code_map.py"],
|
|
project_root,
|
|
timeout=120,
|
|
)
|
|
|
|
success = result.returncode == 0
|
|
|
|
if not yaml_output:
|
|
if success:
|
|
if not silent:
|
|
print(result.stdout)
|
|
msg = "✅ Code maps generated in .llm/ directory"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "❌ Code map generation failed"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
if result.stderr:
|
|
print(Colors.colorize(result.stderr, Colors.RED))
|
|
|
|
stats = {"Success": success}
|
|
|
|
if yaml_output:
|
|
output_yaml_results("codemap", stats, success)
|
|
|
|
return success, stats
|
|
|
|
except Exception as e:
|
|
if not silent and not yaml_output:
|
|
print(f"❌ ERROR: {e}")
|
|
return False, {}
|
|
|
|
|
|
def run_naming(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Check naming conventions for .tscn and .gd files."""
|
|
if not silent and not yaml_output:
|
|
print_header("📝 Naming Convention Check")
|
|
|
|
# Get all files that should follow naming conventions
|
|
naming_files = get_naming_files(project_root)
|
|
total_files = len(naming_files)
|
|
|
|
if total_files == 0:
|
|
if not silent:
|
|
msg = "No .tscn or .gd files found to check naming conventions."
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW)
|
|
print(colored_msg)
|
|
return True, {"Total files": 0, "Valid files": 0, "Invalid files": 0}
|
|
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Checking naming conventions for {total_files} files..."
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(colored_count)
|
|
print()
|
|
|
|
# Process files
|
|
valid_files = 0
|
|
invalid_files = 0
|
|
all_output = []
|
|
|
|
for file_path in naming_files:
|
|
is_valid, error_msg = check_naming_convention(file_path)
|
|
relative_path = file_path.relative_to(project_root)
|
|
|
|
if is_valid:
|
|
valid_files += 1
|
|
if not silent and not yaml_output:
|
|
file_msg = f"📄 Checking: {relative_path}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
success_msg = " ✅ Follows PascalCase convention"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN)
|
|
all_output.extend([file_msg, success_msg])
|
|
print(colored_file)
|
|
print(colored_success)
|
|
else:
|
|
invalid_files += 1
|
|
# Always show naming errors, even in silent mode
|
|
if not yaml_output:
|
|
file_msg = f"📄 Checking: {relative_path}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
error_msg_display = f" ❌ {error_msg}"
|
|
colored_error = Colors.colorize(error_msg_display, Colors.RED)
|
|
all_output.extend([file_msg, error_msg_display])
|
|
print(colored_file)
|
|
print(colored_error)
|
|
|
|
# Results summary
|
|
overall_success = invalid_files == 0
|
|
stats = {
|
|
"Total files": total_files,
|
|
"Valid files": valid_files,
|
|
"Invalid files": invalid_files,
|
|
}
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
if overall_success:
|
|
success_msg = "✅ All files follow PascalCase naming convention!"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN)
|
|
print(colored_success)
|
|
else:
|
|
error_msg = f"❌ {invalid_files} file(s) don't follow PascalCase convention"
|
|
colored_error = Colors.colorize(error_msg, Colors.RED)
|
|
print(colored_error)
|
|
|
|
if yaml_output:
|
|
# Collect only failed files for YAML output (don't include all files)
|
|
failed_paths = []
|
|
|
|
for file_path in naming_files:
|
|
relative_path_str = str(file_path.relative_to(project_root))
|
|
is_valid, error_msg = check_naming_convention(file_path)
|
|
|
|
if not is_valid:
|
|
failed_paths.append(relative_path_str)
|
|
|
|
results = {
|
|
**stats, # Include stats at top level for consistency
|
|
"failed_paths": failed_paths, # For failed_items extraction
|
|
}
|
|
output_yaml_results("naming", results, overall_success)
|
|
|
|
return overall_success, stats
|
|
|
|
|
|
def run_format(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run gdformat on all GDScript files."""
|
|
if not yaml_output:
|
|
print_header("🎨 GDScript Formatter", silent)
|
|
|
|
gd_files = get_gd_files(project_root)
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Found {len(gd_files)} GDScript files to format."
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(f"{colored_count}\n")
|
|
|
|
formatted_files = failed_files = 0
|
|
failed_paths = []
|
|
|
|
for gd_file in gd_files:
|
|
relative_path = gd_file.relative_to(project_root)
|
|
if not silent and not yaml_output:
|
|
file_msg = f"🎯 Formatting: {relative_path.name}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
print(colored_file)
|
|
|
|
if should_skip_file(gd_file):
|
|
if not silent and not yaml_output:
|
|
print_skip_message("gdformat")
|
|
formatted_files += 1
|
|
if not silent and not yaml_output:
|
|
print()
|
|
continue
|
|
|
|
try:
|
|
result = run_command(["gdformat", str(gd_file)], project_root)
|
|
|
|
if result.returncode == 0:
|
|
if not silent and not yaml_output:
|
|
success_msg = "✅ Success"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN)
|
|
print(f" {colored_success}")
|
|
formatted_files += 1
|
|
else:
|
|
if not silent and not yaml_output:
|
|
fail_msg = f"❌ FAILED: {relative_path}"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED)
|
|
print(f" {colored_fail}")
|
|
output = (result.stdout + result.stderr).strip()
|
|
if output:
|
|
colored_output = Colors.colorize(output, Colors.RED)
|
|
print(f" {colored_output}")
|
|
failed_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
|
|
except FileNotFoundError:
|
|
if not silent and not yaml_output:
|
|
print(" ❌ ERROR: gdformat not found")
|
|
return False, {}
|
|
except Exception as e:
|
|
if not silent and not yaml_output:
|
|
print(f" ❌ ERROR: {e}")
|
|
failed_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
|
|
# Summary
|
|
stats = {
|
|
"Total files": len(gd_files),
|
|
"Successfully formatted": formatted_files,
|
|
"Failed": failed_files,
|
|
}
|
|
|
|
success = failed_files == 0
|
|
|
|
if yaml_output:
|
|
output_yaml_results("format", {**stats, "failed_paths": failed_paths}, success)
|
|
else:
|
|
print_summary("Formatting Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if not success:
|
|
msg = "⚠️ WARNING: Some files failed to format"
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "✅ All GDScript files formatted successfully!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif not success:
|
|
# In silent mode, still show failed files
|
|
for failed_path in failed_paths:
|
|
print(f"❌ {failed_path}")
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
def run_ruff(
|
|
project_root: Path,
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
fix: bool = True,
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run ruff format + check with --fix on all Python files."""
|
|
if not yaml_output:
|
|
print_header("🐍 Python Formatter & Linter (Ruff)", silent)
|
|
|
|
py_files = get_py_files(project_root)
|
|
if not silent and not yaml_output:
|
|
count_msg = f"Found {len(py_files)} Python files to format and lint."
|
|
colored_count = Colors.colorize(count_msg, Colors.BLUE)
|
|
print(f"{colored_count}\n")
|
|
|
|
clean_files = warning_files = error_files = 0
|
|
failed_paths = []
|
|
|
|
for py_file in py_files:
|
|
relative_path = py_file.relative_to(project_root)
|
|
if not silent and not yaml_output:
|
|
file_msg = f"🐍 Processing: {relative_path.name}"
|
|
colored_file = Colors.colorize(file_msg, Colors.CYAN)
|
|
print(colored_file)
|
|
|
|
try:
|
|
# Step 1: Format the file
|
|
format_result = run_command(["ruff", "format", str(py_file)], project_root)
|
|
|
|
# Step 2: Run linting with --fix if enabled
|
|
lint_cmd = ["ruff", "check", str(py_file)]
|
|
if fix:
|
|
lint_cmd.append("--fix")
|
|
|
|
lint_result = run_command(lint_cmd, project_root)
|
|
output = (lint_result.stdout + lint_result.stderr).strip()
|
|
|
|
# Check if formatting failed
|
|
if format_result.returncode != 0:
|
|
error_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
if not yaml_output:
|
|
format_error = (format_result.stdout + format_result.stderr).strip()
|
|
print_result(False, f"Format error: {format_error}", silent)
|
|
# Check linting results
|
|
elif lint_result.returncode == 0:
|
|
clean_files += 1
|
|
if not yaml_output:
|
|
if output:
|
|
# Fixed issues automatically
|
|
print_result(True, f"✅ Formatted and fixed: {output}", silent)
|
|
else:
|
|
print_result(True, "✅ Formatted and clean", silent)
|
|
else:
|
|
# Linting issues that couldn't be auto-fixed
|
|
if fix:
|
|
warning_files += 1
|
|
if not yaml_output:
|
|
print_result(
|
|
True, f"⚠️ Formatted, remaining issues: {output}", silent
|
|
)
|
|
else:
|
|
error_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
if not yaml_output:
|
|
print_result(False, f"Lint errors: {output}", silent)
|
|
|
|
except FileNotFoundError:
|
|
if not silent and not yaml_output:
|
|
print(" ❌ ERROR: ruff not found")
|
|
return False, {}
|
|
except Exception as e:
|
|
if not silent and not yaml_output:
|
|
print(f" ❌ ERROR: {e}")
|
|
error_files += 1
|
|
failed_paths.append(str(relative_path))
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
|
|
# Summary
|
|
stats = {
|
|
"Total files": len(py_files),
|
|
"Clean files": clean_files,
|
|
"Files with warnings": warning_files,
|
|
"Files with errors": error_files,
|
|
}
|
|
|
|
success = error_files == 0
|
|
|
|
if yaml_output:
|
|
output_yaml_results("ruff", {**stats, "failed_paths": failed_paths}, success)
|
|
else:
|
|
action_name = (
|
|
"Python Format & Lint (with auto-fix)" if fix else "Python Format & Lint"
|
|
)
|
|
print_summary(f"{action_name} Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if not success:
|
|
msg = "❌ Python processing FAILED - Please fix the errors above"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif warning_files > 0:
|
|
msg = "⚠️ Python files processed with remaining lint warnings"
|
|
colored_msg = Colors.colorize(msg, Colors.YELLOW + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = "✅ All Python files formatted and linted successfully!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
elif not success:
|
|
for failed_path in failed_paths:
|
|
print(f"❌ {failed_path}")
|
|
|
|
return success, {**stats, "failed_paths": failed_paths}
|
|
|
|
|
|
def discover_test_files(project_root: Path) -> List[Tuple[Path, str]]:
|
|
"""Discover all test files with their prefixes."""
|
|
test_dirs = [
|
|
("tests", ""),
|
|
("tests/unit", "Unit: "),
|
|
("tests/integration", "Integration: "),
|
|
]
|
|
|
|
test_files = []
|
|
for test_dir, prefix in test_dirs:
|
|
test_path = project_root / test_dir
|
|
if test_path.exists():
|
|
for test_file in test_path.glob("Test*.gd"):
|
|
test_files.append((test_file, prefix))
|
|
|
|
return test_files
|
|
|
|
|
|
def run_tests(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Run Godot tests."""
|
|
if not yaml_output:
|
|
print_header("🧪 GDScript Test Runner", silent)
|
|
|
|
test_files = discover_test_files(project_root)
|
|
|
|
if not silent and not yaml_output:
|
|
scan_msg = "🔍 Scanning for test files in tests\\ directory..."
|
|
colored_scan = Colors.colorize(scan_msg, Colors.BLUE)
|
|
print(colored_scan)
|
|
|
|
discover_msg = "\n📋 Discovered test files:"
|
|
colored_discover = Colors.colorize(discover_msg, Colors.CYAN)
|
|
print(colored_discover)
|
|
|
|
for test_file, prefix in test_files:
|
|
test_name = format_test_name(test_file.stem)
|
|
file_info = f" {prefix}{test_name}: {test_file}"
|
|
colored_file_info = Colors.colorize(file_info, Colors.MAGENTA)
|
|
print(colored_file_info)
|
|
|
|
start_msg = "\n🚀 Starting test execution...\n"
|
|
colored_start = Colors.colorize(start_msg, Colors.BLUE + Colors.BOLD)
|
|
print(colored_start)
|
|
|
|
total_tests = failed_tests = 0
|
|
test_results = []
|
|
|
|
for test_file, prefix in test_files:
|
|
test_name = format_test_name(test_file.stem)
|
|
full_test_name = f"{prefix}{test_name}"
|
|
|
|
if not silent and not yaml_output:
|
|
header_msg = f"=== {full_test_name} ==="
|
|
colored_header = Colors.colorize(header_msg, Colors.CYAN + Colors.BOLD)
|
|
print(f"\n{colored_header}")
|
|
|
|
running_msg = f"🎯 Running: {test_file}"
|
|
colored_running = Colors.colorize(running_msg, Colors.BLUE)
|
|
print(colored_running)
|
|
|
|
try:
|
|
result = run_command(
|
|
["godot", "--headless", "--script", str(test_file)],
|
|
project_root,
|
|
timeout=60,
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
if not silent and not yaml_output:
|
|
pass_msg = f"✅ PASSED: {full_test_name}"
|
|
colored_pass = Colors.colorize(pass_msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_pass)
|
|
test_results.append((full_test_name, True, ""))
|
|
else:
|
|
if not silent and not yaml_output:
|
|
fail_msg = f"❌ FAILED: {full_test_name}"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_fail)
|
|
elif silent and not yaml_output:
|
|
print(f"❌ {full_test_name}")
|
|
failed_tests += 1
|
|
error_msg = (result.stderr + result.stdout).strip() or "Unknown error"
|
|
test_results.append((full_test_name, False, error_msg))
|
|
|
|
total_tests += 1
|
|
|
|
except subprocess.TimeoutExpired:
|
|
if not silent and not yaml_output:
|
|
timeout_msg = f"⏰ FAILED: {full_test_name} (TIMEOUT)"
|
|
colored_timeout = Colors.colorize(timeout_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_timeout)
|
|
elif silent and not yaml_output:
|
|
print(f"⏰ {full_test_name} (TIMEOUT)")
|
|
failed_tests += 1
|
|
test_results.append((full_test_name, False, "Test timed out"))
|
|
total_tests += 1
|
|
except FileNotFoundError:
|
|
if not yaml_output and not silent:
|
|
error_msg = "❌ ERROR: Godot not found"
|
|
colored_error = Colors.colorize(error_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_error)
|
|
return False, {}
|
|
except Exception as e:
|
|
if not silent and not yaml_output:
|
|
exc_msg = f"💥 FAILED: {full_test_name} (ERROR: {e})"
|
|
colored_exc = Colors.colorize(exc_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_exc)
|
|
elif silent and not yaml_output:
|
|
print(f"💥 {full_test_name} (ERROR: {e})")
|
|
failed_tests += 1
|
|
test_results.append((full_test_name, False, str(e)))
|
|
total_tests += 1
|
|
|
|
if not silent and not yaml_output:
|
|
print()
|
|
|
|
# Summary
|
|
passed_tests = total_tests - failed_tests
|
|
stats = {
|
|
"Total Tests Run": total_tests,
|
|
"Tests Passed": passed_tests,
|
|
"Tests Failed": failed_tests,
|
|
}
|
|
|
|
success = failed_tests == 0
|
|
|
|
if yaml_output:
|
|
yaml_results = {**stats, "results": test_results}
|
|
if failed_tests > 0:
|
|
yaml_results["failed_test_details"] = [
|
|
{"test": name, "error": error}
|
|
for name, passed, error in test_results
|
|
if not passed and error
|
|
]
|
|
|
|
output_yaml_results("test", yaml_results, success)
|
|
else:
|
|
print_summary("Test Execution Summary", stats, silent)
|
|
if not silent:
|
|
print()
|
|
if success:
|
|
msg = "🎉 ALL TESTS PASSED!"
|
|
colored_msg = Colors.colorize(msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_msg)
|
|
else:
|
|
msg = f"💥 {failed_tests} TEST(S) FAILED"
|
|
colored_msg = Colors.colorize(msg, Colors.RED + Colors.BOLD)
|
|
print(colored_msg)
|
|
|
|
return success, {**stats, "results": test_results}
|
|
|
|
|
|
async def run_tests_async(
|
|
project_root: Path, silent: bool = False, yaml_output: bool = False
|
|
) -> Tuple[bool, Dict]:
|
|
"""Simplified async test runner."""
|
|
if not yaml_output:
|
|
print_header("🧪 GDScript Test Runner (Async)", silent)
|
|
|
|
test_files = discover_test_files(project_root)
|
|
if not test_files:
|
|
return True, {
|
|
"Total Tests Run": 0,
|
|
"Tests Passed": 0,
|
|
"Tests Failed": 0,
|
|
"results": [],
|
|
}
|
|
|
|
if not silent and not yaml_output:
|
|
print(
|
|
f"{Colors.colorize(f'Found {len(test_files)} test files', Colors.BLUE)}\n"
|
|
)
|
|
|
|
semaphore = asyncio.Semaphore(3) # Limit concurrent tests
|
|
test_results = []
|
|
|
|
async def run_single_test(test_file: Path, prefix: str) -> dict:
|
|
async with semaphore:
|
|
test_name = f"{prefix}{format_test_name(test_file.stem)}"
|
|
display_output = []
|
|
|
|
try:
|
|
returncode, stdout, stderr = await run_command_async(
|
|
["godot", "--headless", "--script", str(test_file)],
|
|
project_root,
|
|
timeout=60,
|
|
)
|
|
|
|
success = returncode == 0
|
|
error_msg = (stderr + stdout).strip() if not success else ""
|
|
|
|
# Build synchronized output
|
|
display_output.append(f"🧪 Running: {test_name}")
|
|
if success:
|
|
display_output.append(" ✅ PASSED")
|
|
else:
|
|
display_output.append(" ❌ FAILED")
|
|
if error_msg:
|
|
for line in error_msg.split("\n"):
|
|
if line.strip():
|
|
display_output.append(
|
|
f" {Colors.colorize(line, Colors.RED)}"
|
|
)
|
|
|
|
return {
|
|
"name": test_name,
|
|
"success": success,
|
|
"error": error_msg,
|
|
"display": "\n".join(display_output),
|
|
}
|
|
except Exception as e:
|
|
display_output.append(f"🧪 Running: {test_name}")
|
|
display_output.append(f" 💥 ERROR: {e}")
|
|
return {
|
|
"name": test_name,
|
|
"success": False,
|
|
"error": str(e),
|
|
"display": "\n".join(display_output),
|
|
}
|
|
|
|
# Run tests concurrently
|
|
tasks = [run_single_test(test_file, prefix) for test_file, prefix in test_files]
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
# Print synchronized output for each test
|
|
if not yaml_output and not silent:
|
|
for result in results:
|
|
print(result["display"])
|
|
print() # Add spacing between tests
|
|
|
|
# Collect stats
|
|
passed = sum(1 for r in results if r["success"])
|
|
failed = len(results) - passed
|
|
test_results = [(r["name"], r["success"], r["error"]) for r in results]
|
|
|
|
stats = {
|
|
"Total Tests Run": len(results),
|
|
"Tests Passed": passed,
|
|
"Tests Failed": failed,
|
|
}
|
|
|
|
if yaml_output:
|
|
yaml_results = {**stats, "results": test_results}
|
|
if failed > 0:
|
|
yaml_results["failed_test_details"] = [
|
|
{"test": name, "error": error}
|
|
for name, passed, error in test_results
|
|
if not passed and error
|
|
]
|
|
|
|
output_yaml_results("test", yaml_results, failed == 0)
|
|
elif not silent:
|
|
print_summary("Test Summary", stats)
|
|
print()
|
|
status_msg = (
|
|
"🎉 ALL TESTS PASSED!" if failed == 0 else f"💥 {failed} TEST(S) FAILED"
|
|
)
|
|
print(Colors.colorize(status_msg, Colors.GREEN if failed == 0 else Colors.RED))
|
|
|
|
return failed == 0, {**stats, "results": test_results}
|
|
|
|
|
|
def run_workflow(
|
|
project_root: Path,
|
|
steps: List[str],
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
) -> bool:
|
|
"""
|
|
Execute development workflow steps in sequence.
|
|
|
|
Runs format, lint, and test steps. Continues executing all steps even if some fail.
|
|
|
|
Args:
|
|
project_root: Path to the project root directory
|
|
steps: List of workflow steps to execute ('format', 'lint', 'test')
|
|
|
|
Returns:
|
|
bool: True if all steps completed successfully, False if any failed
|
|
"""
|
|
if not silent:
|
|
print_header("🔄 Development Workflow Runner")
|
|
|
|
workflow_steps = {
|
|
"lint": (
|
|
"🔍 GDScript linting (gdlint)",
|
|
lambda root: run_lint(root, silent, yaml_output),
|
|
),
|
|
"format": (
|
|
"🎨 GDScript formatting (gdformat)",
|
|
lambda root: run_format(root, silent, yaml_output),
|
|
),
|
|
"ruff": (
|
|
"🐍 Python formatting & linting (ruff)",
|
|
lambda root: run_ruff(root, silent, yaml_output),
|
|
),
|
|
"test": (
|
|
"🧪 Test execution (godot tests)",
|
|
lambda root: run_tests(root, silent, yaml_output),
|
|
),
|
|
"validate": (
|
|
"📋 File format validation (yaml/toml/json)",
|
|
lambda root: run_validate(root, silent, yaml_output),
|
|
),
|
|
"naming": (
|
|
"📝 Naming convention check (PascalCase)",
|
|
lambda root: run_naming(root, silent, yaml_output),
|
|
),
|
|
"codemap": (
|
|
"🗺️ Code map generation (yaml)",
|
|
lambda root: run_codemap(root, silent, yaml_output),
|
|
),
|
|
}
|
|
|
|
if not silent:
|
|
intro_msg = "🚀 This script will run the development workflow:"
|
|
colored_intro = Colors.colorize(intro_msg, Colors.BLUE + Colors.BOLD)
|
|
print(colored_intro)
|
|
|
|
for i, step in enumerate(steps, 1):
|
|
step_name = workflow_steps[step][0]
|
|
step_msg = f"{i}. {step_name}"
|
|
colored_step = Colors.colorize(step_msg, Colors.CYAN)
|
|
print(colored_step)
|
|
print()
|
|
|
|
start_time = time.time()
|
|
results = {}
|
|
|
|
for step in steps:
|
|
step_name, step_func = workflow_steps[step]
|
|
if not silent:
|
|
separator = Colors.colorize("-" * 48, Colors.MAGENTA)
|
|
print(separator)
|
|
|
|
running_msg = f"⚡ Running {step_name}"
|
|
colored_running = Colors.colorize(running_msg, Colors.BLUE + Colors.BOLD)
|
|
print(colored_running)
|
|
print(separator)
|
|
|
|
success, step_results = step_func(project_root)
|
|
results[step] = step_results
|
|
|
|
if not silent:
|
|
if not success:
|
|
fail_msg = f"❌ {step.upper()} FAILED - Continuing with remaining steps"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED + Colors.BOLD)
|
|
print(f"\n{colored_fail}")
|
|
|
|
warning_msg = (
|
|
"⚠️ Issues found, but continuing to provide complete feedback"
|
|
)
|
|
colored_warning = Colors.colorize(warning_msg, Colors.YELLOW)
|
|
print(colored_warning)
|
|
|
|
status_msg = f"✅ {step_name} completed {'successfully' if success else 'with issues'}"
|
|
colored_status = Colors.colorize(
|
|
status_msg, Colors.GREEN if success else Colors.YELLOW
|
|
)
|
|
print(colored_status)
|
|
print()
|
|
|
|
# Final summary
|
|
elapsed_time = time.time() - start_time
|
|
|
|
if not silent:
|
|
print_header("📊 Workflow Summary")
|
|
|
|
all_success = True
|
|
for step in steps:
|
|
step_success = (
|
|
results[step].get("Tests Failed", results[step].get("Failed", 0)) == 0
|
|
)
|
|
if not silent:
|
|
status_emoji = "✅" if step_success else "❌"
|
|
status_text = "PASSED" if step_success else "FAILED"
|
|
status_color = Colors.GREEN if step_success else Colors.RED
|
|
|
|
step_emoji = {
|
|
"lint": "🔍",
|
|
"format": "🎨",
|
|
"test": "🧪",
|
|
"validate": "📋",
|
|
}.get(step, "📋")
|
|
colored_status = Colors.colorize(
|
|
f"{status_text}", status_color + Colors.BOLD
|
|
)
|
|
print(f"{step_emoji} {step.capitalize()}: {status_emoji} {colored_status}")
|
|
|
|
if not step_success:
|
|
all_success = False
|
|
|
|
if not silent:
|
|
print()
|
|
if all_success:
|
|
success_msg = "🎉 ALL WORKFLOW STEPS COMPLETED SUCCESSFULLY!"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_success)
|
|
|
|
commit_msg = "🚀 Your code is ready for commit."
|
|
colored_commit = Colors.colorize(commit_msg, Colors.CYAN)
|
|
print(colored_commit)
|
|
else:
|
|
fail_msg = "❌ WORKFLOW COMPLETED WITH FAILURES"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_fail)
|
|
|
|
review_msg = "🔧 Please fix the issues above before committing."
|
|
colored_review = Colors.colorize(review_msg, Colors.RED)
|
|
print(colored_review)
|
|
|
|
time_msg = f"⏱️ Elapsed time: {elapsed_time:.1f} seconds"
|
|
colored_time = Colors.colorize(time_msg, Colors.MAGENTA)
|
|
print(f"\n{colored_time}")
|
|
|
|
return all_success
|
|
|
|
|
|
async def run_workflow_async(
|
|
project_root: Path,
|
|
steps: List[str],
|
|
silent: bool = False,
|
|
yaml_output: bool = False,
|
|
) -> bool:
|
|
"""
|
|
Execute development workflow steps asynchronously.
|
|
|
|
Runs format, lint, and test steps. Continues executing all steps even if some fail.
|
|
Uses async functions for improved performance through concurrent execution.
|
|
|
|
Args:
|
|
project_root: Path to the project root directory
|
|
steps: List of workflow steps to execute ('format', 'lint', 'test', 'validate')
|
|
|
|
Returns:
|
|
bool: True if all steps completed successfully, False if any failed
|
|
"""
|
|
if not silent:
|
|
print_header("🔄 Development Workflow Runner (Async)")
|
|
|
|
workflow_steps = {
|
|
"lint": (
|
|
"🔍 GDScript linting (gdlint)",
|
|
lambda root: run_lint_async(root, silent, yaml_output),
|
|
),
|
|
"format": (
|
|
"🎨 GDScript formatting (gdformat)",
|
|
lambda root: run_format_async(root, silent, yaml_output),
|
|
),
|
|
"ruff": (
|
|
"🐍 Python formatting & linting (ruff)",
|
|
lambda root: run_ruff_async(root, silent, yaml_output),
|
|
),
|
|
"test": (
|
|
"🧪 Test execution (godot tests)",
|
|
lambda root: run_tests_async(root, silent, yaml_output),
|
|
),
|
|
"validate": (
|
|
"📋 File format validation (yaml/toml/json)",
|
|
lambda root: run_validate_async(root, silent, yaml_output),
|
|
),
|
|
"naming": (
|
|
"📝 Naming convention check (PascalCase)",
|
|
lambda root: run_naming(
|
|
root, silent, yaml_output
|
|
), # Using sync version - lightweight
|
|
),
|
|
"codemap": (
|
|
"🗺️ Code map generation (yaml)",
|
|
lambda root: run_codemap(
|
|
root, silent, yaml_output
|
|
), # Using sync version - subprocess call
|
|
),
|
|
}
|
|
|
|
if not silent:
|
|
intro_msg = "🚀 This script will run the development workflow (with async optimization):"
|
|
colored_intro = Colors.colorize(intro_msg, Colors.BLUE + Colors.BOLD)
|
|
print(colored_intro)
|
|
|
|
for i, step in enumerate(steps, 1):
|
|
step_name = workflow_steps[step][0]
|
|
step_msg = f"{i}. {step_name}"
|
|
colored_step = Colors.colorize(step_msg, Colors.CYAN)
|
|
print(colored_step)
|
|
print()
|
|
|
|
start_time = time.time()
|
|
results = {}
|
|
|
|
for step in steps:
|
|
step_name, step_func = workflow_steps[step]
|
|
if not silent:
|
|
separator = Colors.colorize("-" * 48, Colors.MAGENTA)
|
|
print(separator)
|
|
|
|
running_msg = f"⚡ Running {step_name}"
|
|
colored_running = Colors.colorize(running_msg, Colors.BLUE + Colors.BOLD)
|
|
print(colored_running)
|
|
print(separator)
|
|
|
|
success, step_results = await step_func(project_root)
|
|
results[step] = step_results
|
|
|
|
if not silent:
|
|
if not success:
|
|
fail_msg = f"❌ {step.upper()} FAILED - Continuing with remaining steps"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED + Colors.BOLD)
|
|
print(f"\n{colored_fail}")
|
|
|
|
warning_msg = (
|
|
"⚠️ Issues found, but continuing to provide complete feedback"
|
|
)
|
|
colored_warning = Colors.colorize(warning_msg, Colors.YELLOW)
|
|
print(colored_warning)
|
|
|
|
status_msg = f"✅ {step_name} completed {'successfully' if success else 'with issues'}"
|
|
colored_status = Colors.colorize(
|
|
status_msg, Colors.GREEN if success else Colors.YELLOW
|
|
)
|
|
print(colored_status)
|
|
print()
|
|
|
|
# Final summary
|
|
elapsed_time = time.time() - start_time
|
|
|
|
if not silent:
|
|
print_header("📊 Workflow Summary")
|
|
|
|
all_success = True
|
|
for step in steps:
|
|
step_success = (
|
|
results[step].get("Tests Failed", results[step].get("Failed", 0)) == 0
|
|
)
|
|
if not silent:
|
|
status_emoji = "✅" if step_success else "❌"
|
|
status_text = "PASSED" if step_success else "FAILED"
|
|
status_color = Colors.GREEN if step_success else Colors.RED
|
|
|
|
step_emoji = {
|
|
"lint": "🔍",
|
|
"format": "🎨",
|
|
"test": "🧪",
|
|
"validate": "📋",
|
|
}.get(step, "📋")
|
|
colored_status = Colors.colorize(
|
|
f"{status_text}", status_color + Colors.BOLD
|
|
)
|
|
print(f"{step_emoji} {step.capitalize()}: {status_emoji} {colored_status}")
|
|
|
|
if not step_success:
|
|
all_success = False
|
|
|
|
if not silent:
|
|
print()
|
|
if all_success:
|
|
success_msg = "🎉 ALL WORKFLOW STEPS COMPLETED SUCCESSFULLY! (Async Mode)"
|
|
colored_success = Colors.colorize(success_msg, Colors.GREEN + Colors.BOLD)
|
|
print(colored_success)
|
|
|
|
commit_msg = "🚀 Your code is ready for commit."
|
|
colored_commit = Colors.colorize(commit_msg, Colors.CYAN)
|
|
print(colored_commit)
|
|
else:
|
|
fail_msg = "❌ WORKFLOW COMPLETED WITH FAILURES"
|
|
colored_fail = Colors.colorize(fail_msg, Colors.RED + Colors.BOLD)
|
|
print(colored_fail)
|
|
|
|
review_msg = "🔧 Please fix the issues above before committing."
|
|
colored_review = Colors.colorize(review_msg, Colors.RED)
|
|
print(colored_review)
|
|
|
|
time_msg = f"⏱️ Elapsed time: {elapsed_time:.1f} seconds (Async Mode)"
|
|
colored_time = Colors.colorize(time_msg, Colors.MAGENTA)
|
|
print(f"\n{colored_time}")
|
|
|
|
return all_success
|
|
|
|
|
|
async def main_async():
|
|
"""Async main entry point."""
|
|
parser = argparse.ArgumentParser(
|
|
description="Run development workflow for Skelly Godot project"
|
|
)
|
|
parser.add_argument(
|
|
"--steps",
|
|
nargs="+",
|
|
choices=["lint", "format", "test", "validate", "ruff", "naming", "codemap"],
|
|
default=["format", "lint", "ruff", "test", "validate", "naming"],
|
|
help="Workflow steps to run",
|
|
)
|
|
parser.add_argument("--lint", action="store_true", help="Run GDScript linting")
|
|
parser.add_argument("--format", action="store_true", help="Run GDScript formatting")
|
|
parser.add_argument("--test", action="store_true", help="Run tests")
|
|
parser.add_argument(
|
|
"--validate", action="store_true", help="Run file format validation"
|
|
)
|
|
parser.add_argument(
|
|
"--ruff", action="store_true", help="Run Python formatting & linting with ruff"
|
|
)
|
|
parser.add_argument(
|
|
"--naming", action="store_true", help="Check PascalCase naming conventions"
|
|
)
|
|
parser.add_argument("--codemap", action="store_true", help="Generate code map YAML")
|
|
parser.add_argument(
|
|
"--silent",
|
|
"-s",
|
|
action="store_true",
|
|
help="Silent mode - hide success messages, only show errors",
|
|
)
|
|
parser.add_argument(
|
|
"--yaml",
|
|
action="store_true",
|
|
help="Output results in machine-readable YAML format",
|
|
)
|
|
parser.add_argument(
|
|
"--async-mode",
|
|
action="store_true",
|
|
help="Use async mode for faster execution (default: auto-detect)",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
project_root = Path(__file__).parent.parent
|
|
|
|
# Determine steps to run
|
|
if args.lint:
|
|
steps = ["lint"]
|
|
elif args.format:
|
|
steps = ["format"]
|
|
elif args.test:
|
|
steps = ["test"]
|
|
elif args.validate:
|
|
steps = ["validate"]
|
|
elif args.ruff:
|
|
steps = ["ruff"]
|
|
elif args.naming:
|
|
steps = ["naming"]
|
|
elif args.codemap:
|
|
steps = ["codemap"]
|
|
else:
|
|
steps = args.steps
|
|
|
|
# Use async mode if available and requested (or default to async for multiple steps)
|
|
use_async = args.async_mode or (len(steps) > 1 and aiofiles is not None)
|
|
|
|
# Run workflow or individual step
|
|
if len(steps) == 1:
|
|
if use_async:
|
|
step_funcs = {
|
|
"lint": lambda root: run_lint_async(root, args.silent, args.yaml),
|
|
"format": lambda root: run_format_async(root, args.silent, args.yaml),
|
|
"test": lambda root: run_tests_async(root, args.silent, args.yaml),
|
|
"validate": lambda root: run_validate_async(
|
|
root, args.silent, args.yaml
|
|
),
|
|
"ruff": lambda root: run_ruff_async(root, args.silent, args.yaml),
|
|
"naming": lambda root: run_naming(root, args.silent, args.yaml),
|
|
"codemap": lambda root: run_codemap(root, args.silent, args.yaml),
|
|
}
|
|
success, _ = await step_funcs[steps[0]](project_root)
|
|
else:
|
|
step_funcs = {
|
|
"lint": lambda root: run_lint(root, args.silent, args.yaml),
|
|
"format": lambda root: run_format(root, args.silent, args.yaml),
|
|
"test": lambda root: run_tests(root, args.silent, args.yaml),
|
|
"validate": lambda root: run_validate(root, args.silent, args.yaml),
|
|
"ruff": lambda root: run_ruff(root, args.silent, args.yaml),
|
|
"naming": lambda root: run_naming(root, args.silent, args.yaml),
|
|
"codemap": lambda root: run_codemap(root, args.silent, args.yaml),
|
|
}
|
|
success, _ = step_funcs[steps[0]](project_root)
|
|
else:
|
|
if use_async:
|
|
success = await run_workflow_async(
|
|
project_root, steps, args.silent, args.yaml
|
|
)
|
|
else:
|
|
success = run_workflow(project_root, steps, args.silent, args.yaml)
|
|
|
|
return 0 if success else 1
|
|
|
|
|
|
def main():
|
|
"""Main entry point that automatically uses async mode when beneficial."""
|
|
try:
|
|
# Try to run in async mode for better performance
|
|
exit_code = asyncio.run(main_async())
|
|
sys.exit(exit_code)
|
|
except Exception:
|
|
# Fall back to synchronous mode if async fails
|
|
parser = argparse.ArgumentParser(
|
|
description="Run development workflow for Skelly Godot project"
|
|
)
|
|
parser.add_argument(
|
|
"--steps",
|
|
nargs="+",
|
|
choices=["lint", "format", "test", "validate", "ruff", "naming"],
|
|
default=["format", "lint", "ruff", "test", "validate", "naming"],
|
|
help="Workflow steps to run",
|
|
)
|
|
parser.add_argument("--lint", action="store_true", help="Run GDScript linting")
|
|
parser.add_argument(
|
|
"--format", action="store_true", help="Run GDScript formatting"
|
|
)
|
|
parser.add_argument("--test", action="store_true", help="Run tests")
|
|
parser.add_argument(
|
|
"--validate", action="store_true", help="Run file format validation"
|
|
)
|
|
parser.add_argument(
|
|
"--ruff",
|
|
action="store_true",
|
|
help="Run Python formatting & linting with ruff",
|
|
)
|
|
parser.add_argument(
|
|
"--naming", action="store_true", help="Check PascalCase naming conventions"
|
|
)
|
|
parser.add_argument(
|
|
"--codemap", action="store_true", help="Generate code map YAML"
|
|
)
|
|
parser.add_argument(
|
|
"--silent",
|
|
"-s",
|
|
action="store_true",
|
|
help="Silent mode - hide success messages, only show errors",
|
|
)
|
|
parser.add_argument(
|
|
"--yaml",
|
|
action="store_true",
|
|
help="Output results in machine-readable YAML format",
|
|
)
|
|
parser.add_argument(
|
|
"--async-mode",
|
|
action="store_true",
|
|
help="Use async mode for faster execution (default: auto-detect)",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
project_root = Path(__file__).parent.parent
|
|
|
|
# Determine steps to run
|
|
if args.lint:
|
|
steps = ["lint"]
|
|
elif args.format:
|
|
steps = ["format"]
|
|
elif args.test:
|
|
steps = ["test"]
|
|
elif args.validate:
|
|
steps = ["validate"]
|
|
elif args.ruff:
|
|
steps = ["ruff"]
|
|
elif args.naming:
|
|
steps = ["naming"]
|
|
elif args.codemap:
|
|
steps = ["codemap"]
|
|
else:
|
|
steps = args.steps
|
|
|
|
# Run workflow or individual step (synchronous fallback)
|
|
if len(steps) == 1:
|
|
step_funcs = {
|
|
"lint": lambda root: run_lint(root, args.silent, args.yaml),
|
|
"format": lambda root: run_format(root, args.silent, args.yaml),
|
|
"test": lambda root: run_tests(root, args.silent, args.yaml),
|
|
"validate": lambda root: run_validate(root, args.silent, args.yaml),
|
|
"ruff": lambda root: run_ruff(root, args.silent, args.yaml),
|
|
"naming": lambda root: run_naming(root, args.silent, args.yaml),
|
|
"codemap": lambda root: run_codemap(root, args.silent, args.yaml),
|
|
}
|
|
success, _ = step_funcs[steps[0]](project_root)
|
|
else:
|
|
success = run_workflow(project_root, steps, args.silent, args.yaml)
|
|
|
|
sys.exit(0 if success else 1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|