fix(codex): model_catalog_json with correct Codex internal format
Previous attempt used OpenAI API format (bare array of {id, object}).
Codex expects ModelsResponse format: {"models": [{slug, display_name,
visibility, shell_type, supported_reasoning_levels, ...}]}.
Format reverse-engineered from codex-rs/core/models.json in official repo.
All 4 models (gpt-5.4, gpt-5.3-codex-spark, gpt-5.3-codex, gpt-5.2-codex)
now appear in interactive model picker.
Cleanup logic detects old bare-array format and replaces automatically.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -44,11 +44,56 @@ RESET = "\033[0m"
|
||||
# Managed config keys (we update these, preserve everything else)
|
||||
MANAGED_TOP_KEYS = {
|
||||
"model", "model_reasoning_effort", "model_provider",
|
||||
"model_catalog_json",
|
||||
"approval_policy", "sandbox_mode",
|
||||
"check_for_update_on_startup", "forced_login_method",
|
||||
}
|
||||
MANAGED_SECTIONS = {"analytics", "model_providers"}
|
||||
|
||||
# Model catalog template (Codex internal format from codex-rs/core/models.json)
|
||||
MODEL_TEMPLATE = {
|
||||
"prefer_websockets": False,
|
||||
"support_verbosity": True,
|
||||
"default_verbosity": "low",
|
||||
"apply_patch_tool_type": "freeform",
|
||||
"input_modalities": ["text", "image"],
|
||||
"supports_image_detail_original": True,
|
||||
"truncation_policy": {"mode": "tokens", "limit": 10000},
|
||||
"supports_parallel_tool_calls": True,
|
||||
"context_window": 272000,
|
||||
"default_reasoning_summary": "none",
|
||||
"shell_type": "shell_command",
|
||||
"visibility": "list",
|
||||
"supported_in_api": True,
|
||||
"availability_nux": None,
|
||||
"upgrade": None,
|
||||
"priority": 0,
|
||||
"base_instructions": "",
|
||||
"model_messages": None,
|
||||
"experimental_supported_tools": [],
|
||||
"supports_reasoning_summaries": True,
|
||||
"supported_reasoning_levels": [
|
||||
{"effort": "low", "description": "Fast responses with lighter reasoning"},
|
||||
{"effort": "medium", "description": "Balances speed and reasoning depth"},
|
||||
{"effort": "high", "description": "Greater reasoning depth for complex problems"},
|
||||
{"effort": "xhigh", "description": "Extra high reasoning depth"},
|
||||
],
|
||||
"default_reasoning_level": "medium",
|
||||
}
|
||||
|
||||
|
||||
def generate_model_catalog(models):
|
||||
"""Generate model catalog JSON in Codex internal format."""
|
||||
entries = []
|
||||
for i, slug in enumerate(models):
|
||||
entry = dict(MODEL_TEMPLATE)
|
||||
entry["slug"] = slug
|
||||
entry["display_name"] = slug
|
||||
entry["description"] = f"Model {slug}"
|
||||
entry["priority"] = i
|
||||
entries.append(entry)
|
||||
return {"models": entries}
|
||||
|
||||
|
||||
# ─── Config Loading ─────────────────────────────────────────────────────
|
||||
|
||||
@@ -163,6 +208,13 @@ def generate_config_toml(existing, config):
|
||||
lines.append(f'model = "{config["model"]}"')
|
||||
lines.append(f'model_reasoning_effort = "{config.get("model_reasoning_effort", "high")}"')
|
||||
lines.append('model_provider = "custom"')
|
||||
|
||||
# Model catalog path (for model picker)
|
||||
codex_dir_path = os.path.join(os.path.expanduser("~"), ".codex")
|
||||
catalog_path = os.path.join(codex_dir_path, "model_catalog.json")
|
||||
catalog_path_toml = catalog_path.replace("\\", "/")
|
||||
lines.append(f'model_catalog_json = "{catalog_path_toml}"')
|
||||
|
||||
lines.append(f'approval_policy = "{config.get("approval_policy", "never")}"')
|
||||
lines.append(f'sandbox_mode = "{config.get("sandbox_mode", "danger-full-access")}"')
|
||||
lines.append(f'check_for_update_on_startup = {toml_value(config.get("check_for_update", False))}')
|
||||
@@ -458,20 +510,17 @@ def apply_all_patches(config, home_dir=None):
|
||||
print(f" Proxy: {config['base_url']}")
|
||||
print()
|
||||
|
||||
# Clean up stale model_catalog.json from previous broken installs
|
||||
stale_catalog = os.path.join(codex_dir, "model_catalog.json")
|
||||
if os.path.isfile(stale_catalog):
|
||||
os.remove(stale_catalog)
|
||||
print(f" {YELLOW}Removed stale model_catalog.json{RESET}")
|
||||
# Generate model catalog JSON for model picker (Codex internal format)
|
||||
catalog_path = os.path.join(codex_dir, "model_catalog.json")
|
||||
models = config.get("models", [config["model"]])
|
||||
catalog = generate_model_catalog(models)
|
||||
with open(catalog_path, "w", encoding="utf-8") as f:
|
||||
json.dump(catalog, f, indent=2)
|
||||
print(f" {'[OK]':>8} Catalog: {catalog_path} ({len(models)} models)")
|
||||
|
||||
# Read existing config
|
||||
existing = read_toml(config_path)
|
||||
|
||||
# Remove model_catalog_json if present (wrong format crashes Codex)
|
||||
if "model_catalog_json" in existing:
|
||||
del existing["model_catalog_json"]
|
||||
print(f" {YELLOW}Removed model_catalog_json from config (unsupported format){RESET}")
|
||||
|
||||
# Backup before any changes
|
||||
backup_file(config_path)
|
||||
|
||||
@@ -581,6 +630,12 @@ def patch_user(user_home, config):
|
||||
codex_dir = os.path.join(user_home, ".codex")
|
||||
os.makedirs(codex_dir, exist_ok=True)
|
||||
|
||||
# Generate model catalog
|
||||
catalog_path = os.path.join(codex_dir, "model_catalog.json")
|
||||
models = config.get("models", [config["model"]])
|
||||
with open(catalog_path, "w", encoding="utf-8") as f:
|
||||
json.dump(generate_model_catalog(models), f, indent=2)
|
||||
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
backup_file(config_path)
|
||||
|
||||
Reference in New Issue
Block a user