release: Claude Code v2.1.72 (25 patches)
This commit is contained in:
@@ -259,9 +259,9 @@ cd unlimitedcoding
|
||||
<!-- MANUAL_VERSION:START -->
|
||||
| Platform | Command |
|
||||
|----------|---------|
|
||||
| Linux / macOS | `sudo bash claude/releases/v2.1.71/install.sh` |
|
||||
| Windows CMD | `claude\releases\v2.1.71\install.bat` |
|
||||
| Windows PowerShell | `powershell -ExecutionPolicy Bypass -File claude\releases\v2.1.71\install.ps1` |
|
||||
| Linux / macOS | `sudo bash claude/releases/v2.1.72/install.sh` |
|
||||
| Windows CMD | `claude\releases\v2.1.72\install.bat` |
|
||||
| Windows PowerShell | `powershell -ExecutionPolicy Bypass -File claude\releases\v2.1.72\install.ps1` |
|
||||
<!-- MANUAL_VERSION:END -->
|
||||
|
||||
## Update
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
🌐 [English](README.md) | [Русский](README_ru.md) | [中文](README_zh.md) | [Español](README_es.md)
|
||||
|
||||
<!-- VERSION_BADGE:START -->
|
||||
Patched Claude Code CLI for use with custom API endpoints. Latest: **v2.1.71** (25 patches).
|
||||
Patched Claude Code CLI for use with custom API endpoints. Latest: **v2.1.72** (25 patches).
|
||||
<!-- VERSION_BADGE:END -->
|
||||
|
||||
## Install
|
||||
|
||||
@@ -22,5 +22,6 @@
|
||||
"timeout_ms": 3000000,
|
||||
"theme": "dark",
|
||||
"complete_onboarding": true,
|
||||
"target_version": "2.1.71"
|
||||
"target_version": "2.1.72",
|
||||
"effort_level": "high"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
{
|
||||
"latest": "2.1.71",
|
||||
"latest": "2.1.72",
|
||||
"releases": [
|
||||
{
|
||||
"version": "2.1.72",
|
||||
"date": "2026-03-10",
|
||||
"patches": 25,
|
||||
"status": "stable"
|
||||
},
|
||||
{
|
||||
"version": "2.1.71",
|
||||
"date": "2026-03-07",
|
||||
|
||||
53
claude/releases/v2.1.72/CHANGELOG_UPSTREAM.md
Normal file
53
claude/releases/v2.1.72/CHANGELOG_UPSTREAM.md
Normal file
@@ -0,0 +1,53 @@
|
||||
## 2.1.72
|
||||
|
||||
- Fixed tool search to activate even with `ANTHROPIC_BASE_URL` as long as `ENABLE_TOOL_SEARCH` is set.
|
||||
- Added `w` key in `/copy` to write the focused selection directly to a file, bypassing the clipboard (useful over SSH)
|
||||
- Added optional description argument to `/plan` (e.g., `/plan fix the auth bug`) that enters plan mode and immediately starts
|
||||
- Added `ExitWorktree` tool to leave an `EnterWorktree` session
|
||||
- Added `CLAUDE_CODE_DISABLE_CRON` environment variable to immediately stop scheduled cron jobs mid-session
|
||||
- Added `lsof`, `pgrep`, `tput`, `ss`, `fd`, and `fdfind` to the bash auto-approval allowlist, reducing permission prompts for common read-only operations
|
||||
- Restored the `model` parameter on the Agent tool for per-invocation model overrides
|
||||
- Simplified effort levels to low/medium/high (removed max) with new symbols (○ ◐ ●) and a brief notification instead of a persistent icon. Use `/effort auto` to reset to default
|
||||
- Improved `/config` — Escape now cancels changes, Enter saves and closes, Space toggles settings
|
||||
- Improved up-arrow history to show current session's messages first when running multiple concurrent sessions
|
||||
- Improved voice input transcription accuracy for repo names and common dev terms (regex, OAuth, JSON)
|
||||
- Improved bash command parsing by switching to a native module — faster initialization and no memory leak
|
||||
- Reduced bundle size by ~510 KB
|
||||
- Changed CLAUDE.md HTML comments (`<!-- ... -->`) to be hidden from Claude when auto-injected. Comments remain visible when read with the Read tool
|
||||
- Fixed slow exits when background tasks or hooks were slow to respond
|
||||
- Fixed agent task progress stuck on "Initializing…"
|
||||
- Fixed skill hooks firing twice per event when a hooks-enabled skill is invoked by the model
|
||||
- Fixed several voice mode issues: occasional input lag, false "No speech detected" errors after releasing push-to-talk, and stale transcripts re-filling the prompt after submission
|
||||
- Fixed `--continue` not resuming from the most recent point after `--compact`
|
||||
- Fixed bash security parsing edge cases
|
||||
- Added support for marketplace git URLs without `.git` suffix (Azure DevOps, AWS CodeCommit)
|
||||
- Improved marketplace clone failure messages to show diagnostic info even when git produces no stderr
|
||||
- Fixed several plugin issues: installation failing on Windows with `EEXIST` error in OneDrive folders, marketplace blocking user-scope installs when a project-scope install exists, `CLAUDE_CODE_PLUGIN_CACHE_DIR` creating literal `~` directories, and `plugin.json` with marketplace-only fields failing to load
|
||||
- Fixed feedback survey appearing too frequently in long sessions
|
||||
- Fixed `--effort` CLI flag being reset by unrelated settings writes on startup
|
||||
- Fixed backgrounded Ctrl+B queries losing their transcript or corrupting the new conversation after `/clear`
|
||||
- Fixed `/clear` killing background agent/bash tasks — only foreground tasks are now cleared
|
||||
- Fixed worktree isolation issues: Task tool resume not restoring cwd, and background task notifications missing `worktreePath` and `worktreeBranch`
|
||||
- Fixed `/model` not displaying results when run while Claude is working
|
||||
- Fixed digit keys selecting menu options instead of typing in plan mode permission prompt's text input
|
||||
- Fixed sandbox permission issues: certain file write operations incorrectly allowed without prompting, and output redirections to allowlisted directories (like `/tmp/claude/`) prompting unnecessarily
|
||||
- Improved CPU utilization in long sessions
|
||||
- Fixed prompt cache invalidation in SDK `query()` calls, reducing input token costs up to 12x
|
||||
- Fixed Escape key becoming unresponsive after cancelling a query
|
||||
- Fixed double Ctrl+C not exiting when background agents or tasks are running
|
||||
- Fixed team agents to inherit the leader's model
|
||||
- Fixed "Always Allow" saving permission rules that never match again
|
||||
- Fixed several hooks issues: `transcript_path` pointing to the wrong directory for resumed/forked sessions, agent `prompt` being silently deleted from settings.json on every settings write, PostToolUse block reason displaying twice, async hooks not receiving stdin with bash `read -r`, and validation error message showing an example that fails validation
|
||||
- Fixed session crashes in Desktop/SDK when Read returned files containing U+2028/U+2029 characters
|
||||
- Fixed terminal title being cleared on exit even when `CLAUDE_CODE_DISABLE_TERMINAL_TITLE` was set
|
||||
- Fixed several permission rule matching issues: wildcard rules not matching commands with heredocs, embedded newlines, or no arguments; `sandbox.excludedCommands` failing with env var prefixes; "always allow" suggesting overly broad prefixes for nested CLI tools; and deny rules not applying to all command forms
|
||||
- Fixed oversized and truncated images from Bash data-URL output
|
||||
- Fixed a crash when resuming sessions that contained Bedrock API errors
|
||||
- Fixed intermittent "expected boolean, received string" validation errors on Edit, Bash, and Grep tool inputs
|
||||
- Fixed multi-line session titles when forking from a conversation whose first message contained newlines
|
||||
- Fixed queued messages not showing attached images, and images being lost when pressing ↑ to edit a queued message
|
||||
- Fixed parallel tool calls where a failed Read/WebFetch/Glob would cancel its siblings — only Bash errors now cascade
|
||||
- VSCode: Fixed scroll speed in integrated terminals not matching native terminals
|
||||
- VSCode: Fixed Shift+Enter submitting input instead of inserting a newline for users with older keybindings
|
||||
- VSCode: Added effort level indicator on the input border
|
||||
- VSCode: Added `vscode://anthropic.claude-code/open` URI handler to open a new Claude Code tab programmatically, with optional `prompt` and `session` query parameters
|
||||
13797
claude/releases/v2.1.72/cli.js
Executable file
13797
claude/releases/v2.1.72/cli.js
Executable file
File diff suppressed because one or more lines are too long
36
claude/releases/v2.1.72/install.sh
Executable file
36
claude/releases/v2.1.72/install.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# Claude Code Patcher — standalone installer for v2.1.72
|
||||
# Usage: sudo bash install.sh [--all] [--skip-cli]
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
CLI_JS="$SCRIPT_DIR/cli.js"
|
||||
PATCHER="$SCRIPT_DIR/claude_code_patcher.py"
|
||||
CONFIG="$SCRIPT_DIR/patcher.config.json"
|
||||
|
||||
TARGET="/usr/lib/node_modules/@anthropic-ai/claude-code/cli.js"
|
||||
[ ! -f "$TARGET" ] && TARGET="/usr/local/lib/node_modules/@anthropic-ai/claude-code/cli.js"
|
||||
[ ! -f "$TARGET" ] && TARGET="/opt/homebrew/lib/node_modules/@anthropic-ai/claude-code/cli.js"
|
||||
|
||||
if [ ! -f "$TARGET" ]; then
|
||||
echo "ERROR: Claude Code cli.js not found. Install Claude Code first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install patched cli.js
|
||||
if [ "$1" != "--skip-cli" ]; then
|
||||
echo "Installing patched cli.js..."
|
||||
BACKUP="$TARGET.bak.$(date +%Y%m%d%H%M%S)"
|
||||
cp "$TARGET" "$BACKUP"
|
||||
cp "$CLI_JS" "$TARGET"
|
||||
node --check "$TARGET" || { cp "$BACKUP" "$TARGET"; echo "SYNTAX ERROR, rolled back"; exit 1; }
|
||||
echo "Installed. Backup: $BACKUP"
|
||||
fi
|
||||
|
||||
# Apply user settings
|
||||
if [ -f "$PATCHER" ] && [ -f "$CONFIG" ]; then
|
||||
echo "Applying user settings..."
|
||||
python3 "$PATCHER" --config "$CONFIG" --all --skip-cli-patch --skip-update
|
||||
fi
|
||||
|
||||
echo "Done. Run 'claude --version' to verify."
|
||||
211
codex_old/README.md
Executable file
211
codex_old/README.md
Executable file
@@ -0,0 +1,211 @@
|
||||
# Codex CLI — Patched
|
||||
|
||||
Patched OpenAI Codex CLI for use with custom API endpoints.
|
||||
Latest: **v0.111.0** (6 config patches).
|
||||
|
||||
> Codex CLI — это compiled Rust binary. В отличие от Claude Code и Gemini CLI (JavaScript),
|
||||
> патчинг выполняется через `config.toml` + переменные окружения.
|
||||
|
||||
## Установка
|
||||
|
||||
### Требования
|
||||
|
||||
- **Python 3.11+** (для `tomllib`)
|
||||
- **curl** (для скачивания бинарника)
|
||||
- **Linux x86_64 или aarch64** (macOS/Windows — вручную)
|
||||
|
||||
### Быстрая установка (Linux)
|
||||
|
||||
```bash
|
||||
# 1. Клонировать репо (или скачать файлы)
|
||||
git clone https://git.sensey24.ru/aibot777/unlimitedcoding.git
|
||||
cd unlimitedcoding/codex
|
||||
|
||||
# 2. Настроить конфиг — указать свой API endpoint и ключ
|
||||
cp codex_config.example.json codex_config.json
|
||||
nano codex_config.json # Изменить base_url и api_key
|
||||
|
||||
# 3. Установить бинарник + применить патчи
|
||||
sudo bash ucodex_install.sh
|
||||
```
|
||||
|
||||
### Ручная установка (шаг за шагом)
|
||||
|
||||
**Шаг 1 — Установить бинарник Codex CLI:**
|
||||
|
||||
```bash
|
||||
# Скачать последнюю версию с GitHub
|
||||
sudo bash update-codex.sh
|
||||
codex --version # Должно показать: codex-cli 0.111.0
|
||||
```
|
||||
|
||||
**Шаг 2 — Настроить конфиг:**
|
||||
|
||||
```bash
|
||||
cp codex_config.example.json codex_config.json
|
||||
```
|
||||
|
||||
Отредактировать `codex_config.json`:
|
||||
```json
|
||||
{
|
||||
"base_url": "https://your-api-endpoint.example.com",
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "gpt-5.2-codex"
|
||||
}
|
||||
```
|
||||
|
||||
**Шаг 3 — Применить патчи:**
|
||||
|
||||
```bash
|
||||
sudo python3 codex_patcher.py --apply --config codex_config.json
|
||||
```
|
||||
|
||||
Вывод:
|
||||
```
|
||||
Codex CLI Patcher
|
||||
Version: 0.111.0
|
||||
Binary: /usr/local/bin/codex
|
||||
Proxy: https://your-api-endpoint.example.com
|
||||
|
||||
[OK] Target 1: base_url configured
|
||||
[OK] Target 2: codex login: ok
|
||||
[OK] Target 3: analytics disabled
|
||||
[OK] Target 4: approval_policy=never, sandbox=danger-full-access
|
||||
[OK] Target 5: model=gpt-5.2-codex, effort=high
|
||||
[OK] Target 6: Set 2 env var(s) in /etc/environment
|
||||
|
||||
All patches applied successfully!
|
||||
```
|
||||
|
||||
**Шаг 4 — Проверить:**
|
||||
|
||||
```bash
|
||||
# Validate
|
||||
python3 update_codex_patcher.py --validate
|
||||
|
||||
# Test
|
||||
codex exec "What is 2+2? Reply with just the number"
|
||||
```
|
||||
|
||||
## Обновление
|
||||
|
||||
### Обновить бинарник Codex CLI
|
||||
|
||||
```bash
|
||||
cd unlimitedcoding/codex
|
||||
sudo bash update-codex.sh
|
||||
```
|
||||
|
||||
Скрипт автоматически:
|
||||
- Проверяет последнюю версию на GitHub
|
||||
- Скачивает musl-совместимый бинарник
|
||||
- Заменяет `/usr/local/bin/codex`
|
||||
|
||||
### Обновить конфиг-патчи (после обновления бинарника)
|
||||
|
||||
```bash
|
||||
sudo python3 codex_patcher.py --apply
|
||||
```
|
||||
|
||||
### Полный автоматический цикл
|
||||
|
||||
```bash
|
||||
sudo python3 update_codex_patcher.py --auto
|
||||
```
|
||||
|
||||
Выполняет: check → update binary → apply patches → validate → test.
|
||||
|
||||
### Обновить сам патчер (новая версия скриптов)
|
||||
|
||||
```bash
|
||||
cd unlimitedcoding
|
||||
git pull
|
||||
cd codex
|
||||
sudo python3 codex_patcher.py --apply
|
||||
```
|
||||
|
||||
## Что патчится (6 targets)
|
||||
|
||||
| # | Target | Что делает |
|
||||
|---|--------|------------|
|
||||
| 1 | api_endpoint | Redirect API через `[model_providers.custom]` в config.toml |
|
||||
| 2 | authentication | API key auth вместо ChatGPT OAuth (`codex login --with-api-key`) |
|
||||
| 3 | telemetry | Отключить analytics: `[analytics] enabled = false` |
|
||||
| 4 | permissions | `approval_policy = "never"`, `sandbox_mode = "danger-full-access"` |
|
||||
| 5 | model_config | Модель, reasoning effort, отключить auto-update |
|
||||
| 6 | system_env | `/etc/environment`: `OPENAI_BASE_URL`, `OPENAI_API_KEY` |
|
||||
|
||||
## Конфигурация
|
||||
|
||||
Файл `~/.codex/config.toml` (генерируется патчером):
|
||||
|
||||
```toml
|
||||
model = "gpt-5.2-codex"
|
||||
model_reasoning_effort = "high"
|
||||
model_provider = "custom"
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "danger-full-access"
|
||||
check_for_update_on_startup = false
|
||||
forced_login_method = "api"
|
||||
|
||||
[analytics]
|
||||
enabled = false
|
||||
|
||||
[model_providers.custom]
|
||||
name = "custom"
|
||||
base_url = "https://your-api-endpoint.example.com/v1"
|
||||
env_key = "OPENAI_API_KEY"
|
||||
wire_api = "responses"
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
Восстановить оригинальный конфиг:
|
||||
|
||||
```bash
|
||||
python3 codex_patcher.py --rollback
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Error loading configuration: missing field `name`"
|
||||
Обновите патчер (`git pull`) и перезапустите: `python3 codex_patcher.py --apply`
|
||||
|
||||
### codex не запускается (segfault / GLIBC)
|
||||
Скрипт `update-codex.sh` скачивает musl-версию для совместимости. Если проблема остаётся:
|
||||
```bash
|
||||
# Проверить бинарник
|
||||
file /usr/local/bin/codex
|
||||
ldd /usr/local/bin/codex # Должно быть "not a dynamic executable" (static)
|
||||
```
|
||||
|
||||
### "OPENAI_API_KEY not set"
|
||||
```bash
|
||||
# Вариант 1: Перезайти в shell (env vars из /etc/environment)
|
||||
source /etc/environment
|
||||
|
||||
# Вариант 2: Установить вручную
|
||||
export OPENAI_BASE_URL="https://your-endpoint/v1"
|
||||
export OPENAI_API_KEY="your-key"
|
||||
```
|
||||
|
||||
### Permissions промпты всё ещё появляются
|
||||
Убедитесь что config.toml содержит `approval_policy = "never"`:
|
||||
```bash
|
||||
cat ~/.codex/config.toml | grep approval
|
||||
```
|
||||
|
||||
## Структура файлов
|
||||
|
||||
```
|
||||
codex/
|
||||
├── codex_patcher.py # Главный патчер (6 targets)
|
||||
├── codex_config.json # Ваш конфиг (не коммитить!)
|
||||
├── codex_config.example.json # Пример конфига
|
||||
├── update_codex_patcher.py # Pipeline CLI
|
||||
├── update-codex.sh # Бинарный updater с GitHub
|
||||
├── ucodex_install.sh # One-liner installer
|
||||
└── updater/
|
||||
├── __init__.py
|
||||
└── config_validator.py # Валидация 6 targets
|
||||
```
|
||||
19
codex_old/codex_config.example.json
Executable file
19
codex_old/codex_config.example.json
Executable file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"base_url": "https://your-api-endpoint.example.com",
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "gpt-5.2-codex",
|
||||
"models": [
|
||||
"gpt-5.4",
|
||||
"gpt-5.3-codex-spark",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex"
|
||||
],
|
||||
"model_reasoning_effort": "high",
|
||||
"approval_policy": "never",
|
||||
"sandbox_mode": "danger-full-access",
|
||||
"wire_api": "responses",
|
||||
"telemetry_enabled": false,
|
||||
"check_for_update": false,
|
||||
"trust_paths": ["/home", "/root", "/tmp"],
|
||||
"target_version": "0.111.0"
|
||||
}
|
||||
19
codex_old/codex_config.json
Executable file
19
codex_old/codex_config.json
Executable file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"base_url": "https://ai.37-187-136-86.sslip.io",
|
||||
"api_key": "ClauderAPI",
|
||||
"model": "gpt-5.2-codex",
|
||||
"models": [
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex",
|
||||
"o3",
|
||||
"o4-mini"
|
||||
],
|
||||
"model_reasoning_effort": "high",
|
||||
"approval_policy": "never",
|
||||
"sandbox_mode": "danger-full-access",
|
||||
"wire_api": "responses",
|
||||
"telemetry_enabled": false,
|
||||
"check_for_update": false,
|
||||
"trust_paths": ["/home", "/root", "/tmp"],
|
||||
"target_version": "0.111.0"
|
||||
}
|
||||
619
codex_old/codex_patcher.py
Executable file
619
codex_old/codex_patcher.py
Executable file
@@ -0,0 +1,619 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Codex CLI Patcher — config+env patching for Codex CLI (Rust binary).
|
||||
|
||||
Unlike Claude Code and Gemini CLI (JavaScript), Codex is a compiled Rust binary.
|
||||
Patching is done via config.toml manipulation and environment variable injection.
|
||||
|
||||
Targets:
|
||||
1. api_endpoint — redirect API to custom proxy via model_providers
|
||||
2. authentication — API key auth via codex login
|
||||
3. telemetry — disable analytics
|
||||
4. permissions — bypass approvals + sandbox
|
||||
5. model_config — model, reasoning, disable auto-update
|
||||
6. system_env — /etc/environment vars
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import platform
|
||||
import subprocess
|
||||
import argparse
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# ─── Constants ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
CONFIG_PATH = SCRIPT_DIR / "codex_config.json"
|
||||
|
||||
IS_WINDOWS = platform.system() == "Windows"
|
||||
IS_MACOS = platform.system() == "Darwin"
|
||||
|
||||
# ANSI colors
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
RED = "\033[91m"
|
||||
CYAN = "\033[96m"
|
||||
BOLD = "\033[1m"
|
||||
RESET = "\033[0m"
|
||||
|
||||
# Managed config keys (we update these, preserve everything else)
|
||||
MANAGED_TOP_KEYS = {
|
||||
"model", "model_reasoning_effort", "model_provider",
|
||||
"approval_policy", "sandbox_mode",
|
||||
"check_for_update_on_startup", "forced_login_method",
|
||||
}
|
||||
MANAGED_SECTIONS = {"analytics", "model_providers"}
|
||||
|
||||
|
||||
# ─── Config Loading ─────────────────────────────────────────────────────
|
||||
|
||||
def load_config(config_path=None):
|
||||
"""Load codex_config.json."""
|
||||
path = Path(config_path) if config_path else CONFIG_PATH
|
||||
if not path.is_file():
|
||||
print(f"{RED}Config not found: {path}{RESET}")
|
||||
sys.exit(1)
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
# ─── Detection ──────────────────────────────────────────────────────────
|
||||
|
||||
def detect_codex():
|
||||
"""Find codex binary. Returns (binary_path, version) or exits."""
|
||||
# Try which/where
|
||||
cmd = "where" if IS_WINDOWS else "which"
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[cmd, "codex"], capture_output=True, text=True, timeout=5
|
||||
)
|
||||
binary_path = result.stdout.strip().split("\n")[0] if result.returncode == 0 else None
|
||||
except Exception:
|
||||
binary_path = None
|
||||
|
||||
if not binary_path:
|
||||
# Common fallback paths
|
||||
for p in ["/usr/local/bin/codex", "/usr/bin/codex"]:
|
||||
if os.path.isfile(p):
|
||||
binary_path = p
|
||||
break
|
||||
|
||||
if not binary_path:
|
||||
print(f"{RED}Codex CLI not found. Install: https://github.com/openai/codex{RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
# Get version
|
||||
version = "unknown"
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[binary_path, "--version"], capture_output=True, text=True, timeout=10
|
||||
)
|
||||
# Output: "codex-cli 0.111.0"
|
||||
if result.returncode == 0:
|
||||
parts = result.stdout.strip().split()
|
||||
if len(parts) >= 2:
|
||||
version = parts[-1]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return binary_path, version
|
||||
|
||||
|
||||
# ─── TOML Read/Write ────────────────────────────────────────────────────
|
||||
|
||||
def read_toml(path):
|
||||
"""Read TOML file. Returns dict or empty dict if not found."""
|
||||
if not os.path.isfile(path):
|
||||
return {}
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
|
||||
|
||||
def read_toml_raw(path):
|
||||
"""Read TOML file as raw text. Returns string or empty string."""
|
||||
if not os.path.isfile(path):
|
||||
return ""
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def toml_value(v):
|
||||
"""Format a Python value as TOML."""
|
||||
if isinstance(v, bool):
|
||||
return "true" if v else "false"
|
||||
if isinstance(v, str):
|
||||
return f'"{v}"'
|
||||
if isinstance(v, (int, float)):
|
||||
return str(v)
|
||||
if isinstance(v, list):
|
||||
items = ", ".join(toml_value(i) for i in v)
|
||||
return f"[{items}]"
|
||||
return str(v)
|
||||
|
||||
|
||||
def generate_config_toml(existing, config):
|
||||
"""Generate config.toml content, merging with existing user config.
|
||||
|
||||
Strategy:
|
||||
- Update managed top-level keys
|
||||
- Update managed sections ([analytics], [model_providers.custom])
|
||||
- Add trust paths to [projects.*]
|
||||
- Preserve all other user-defined content
|
||||
"""
|
||||
lines = ["# Codex CLI Configuration (managed by codex_patcher.py)"]
|
||||
|
||||
# Top-level managed keys
|
||||
lines.append(f'model = "{config["model"]}"')
|
||||
lines.append(f'model_reasoning_effort = "{config.get("model_reasoning_effort", "high")}"')
|
||||
lines.append('model_provider = "custom"')
|
||||
lines.append(f'approval_policy = "{config.get("approval_policy", "never")}"')
|
||||
lines.append(f'sandbox_mode = "{config.get("sandbox_mode", "danger-full-access")}"')
|
||||
lines.append(f'check_for_update_on_startup = {toml_value(config.get("check_for_update", False))}')
|
||||
lines.append('forced_login_method = "api"')
|
||||
|
||||
# Preserve existing top-level keys we don't manage
|
||||
for key, val in existing.items():
|
||||
if key not in MANAGED_TOP_KEYS and not isinstance(val, dict):
|
||||
lines.append(f"{key} = {toml_value(val)}")
|
||||
|
||||
# [analytics]
|
||||
lines.append("")
|
||||
lines.append("[analytics]")
|
||||
lines.append(f"enabled = {toml_value(config.get('telemetry_enabled', False))}")
|
||||
|
||||
# [model_providers.custom]
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
lines.append("")
|
||||
lines.append("[model_providers.custom]")
|
||||
lines.append('name = "custom"')
|
||||
lines.append(f'base_url = "{base_url}"')
|
||||
lines.append(f'env_key = "OPENAI_API_KEY"')
|
||||
lines.append(f'wire_api = "{config.get("wire_api", "responses")}"')
|
||||
|
||||
# Preserve other model_providers
|
||||
mp = existing.get("model_providers", {})
|
||||
if isinstance(mp, dict):
|
||||
for name, provider in mp.items():
|
||||
if name == "custom":
|
||||
continue
|
||||
lines.append("")
|
||||
lines.append(f"[model_providers.{name}]")
|
||||
for k, v in provider.items():
|
||||
lines.append(f"{k} = {toml_value(v)}")
|
||||
|
||||
# Trust paths
|
||||
trust_paths = config.get("trust_paths", ["/home", "/root", "/tmp"])
|
||||
existing_projects = existing.get("projects", {})
|
||||
|
||||
# Add our trust paths
|
||||
for tp in trust_paths:
|
||||
lines.append("")
|
||||
lines.append(f'[projects."{tp}"]')
|
||||
lines.append('trust_level = "trusted"')
|
||||
|
||||
# Preserve user's existing project trust entries (that aren't in our list)
|
||||
for path, proj_conf in existing_projects.items():
|
||||
if path not in trust_paths and isinstance(proj_conf, dict):
|
||||
lines.append("")
|
||||
lines.append(f'[projects."{path}"]')
|
||||
for k, v in proj_conf.items():
|
||||
lines.append(f"{k} = {toml_value(v)}")
|
||||
|
||||
# Preserve other sections we don't manage
|
||||
skip_sections = {"analytics", "model_providers", "projects"}
|
||||
for key, val in existing.items():
|
||||
if key in skip_sections or key in MANAGED_TOP_KEYS:
|
||||
continue
|
||||
if isinstance(val, dict):
|
||||
lines.append("")
|
||||
lines.append(f"[{key}]")
|
||||
for k, v in val.items():
|
||||
if isinstance(v, dict):
|
||||
# Nested table
|
||||
lines.append("")
|
||||
lines.append(f"[{key}.{k}]")
|
||||
for kk, vv in v.items():
|
||||
lines.append(f"{kk} = {toml_value(vv)}")
|
||||
else:
|
||||
lines.append(f"{k} = {toml_value(v)}")
|
||||
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
# ─── Backup ─────────────────────────────────────────────────────────────
|
||||
|
||||
def backup_file(path):
|
||||
"""Create timestamped backup. Returns backup path or None."""
|
||||
if not os.path.isfile(path):
|
||||
return None
|
||||
ts = datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
backup = f"{path}.backup.{ts}"
|
||||
shutil.copy2(path, backup)
|
||||
return backup
|
||||
|
||||
|
||||
# ─── Target 1: API Endpoint ─────────────────────────────────────────────
|
||||
|
||||
def patch_api_endpoint(codex_dir, config):
|
||||
"""Target 1: Configure [model_providers.custom] in config.toml."""
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
|
||||
# Check if already configured
|
||||
mp = existing.get("model_providers", {})
|
||||
custom = mp.get("custom", {}) if isinstance(mp, dict) else {}
|
||||
if (custom.get("base_url") == base_url and
|
||||
existing.get("model_provider") == "custom"):
|
||||
return True, "Already configured"
|
||||
|
||||
return True, f"Will set base_url={base_url}"
|
||||
|
||||
|
||||
# ─── Target 2: Authentication ───────────────────────────────────────────
|
||||
|
||||
def patch_auth(config, home_dir=None):
|
||||
"""Target 2: Configure API key auth via codex login --with-api-key."""
|
||||
api_key = config["api_key"]
|
||||
messages = []
|
||||
|
||||
# Set env var for current process
|
||||
os.environ["OPENAI_API_KEY"] = api_key
|
||||
|
||||
# Run codex login --with-api-key
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["OPENAI_API_KEY"] = api_key
|
||||
result = subprocess.run(
|
||||
["codex", "login", "--with-api-key"],
|
||||
input=api_key + "\n",
|
||||
capture_output=True, text=True, timeout=30, env=env
|
||||
)
|
||||
if result.returncode == 0:
|
||||
messages.append("codex login: ok")
|
||||
else:
|
||||
# May already be logged in or other issue
|
||||
stderr = result.stderr.strip()
|
||||
if "already" in stderr.lower():
|
||||
messages.append("codex login: already authenticated")
|
||||
else:
|
||||
messages.append(f"codex login: exit {result.returncode}")
|
||||
except subprocess.TimeoutExpired:
|
||||
messages.append("codex login: timeout (30s)")
|
||||
except FileNotFoundError:
|
||||
messages.append("codex login: binary not found")
|
||||
except Exception as e:
|
||||
messages.append(f"codex login: {e}")
|
||||
|
||||
return True, "; ".join(messages)
|
||||
|
||||
|
||||
# ─── Target 3: Telemetry ────────────────────────────────────────────────
|
||||
|
||||
def patch_telemetry(codex_dir, config):
|
||||
"""Target 3: Disable analytics in config.toml."""
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
|
||||
analytics = existing.get("analytics", {})
|
||||
if isinstance(analytics, dict) and analytics.get("enabled") is False:
|
||||
return True, "Already disabled"
|
||||
|
||||
return True, "Will disable analytics"
|
||||
|
||||
|
||||
# ─── Target 4: Permissions ──────────────────────────────────────────────
|
||||
|
||||
def patch_permissions(codex_dir, config):
|
||||
"""Target 4: Set approval_policy=never, sandbox=danger-full-access."""
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
|
||||
policy = config.get("approval_policy", "never")
|
||||
sandbox = config.get("sandbox_mode", "danger-full-access")
|
||||
|
||||
if (existing.get("approval_policy") == policy and
|
||||
existing.get("sandbox_mode") == sandbox):
|
||||
return True, "Already configured"
|
||||
|
||||
return True, f"Will set approval={policy}, sandbox={sandbox}"
|
||||
|
||||
|
||||
# ─── Target 5: Model Config ─────────────────────────────────────────────
|
||||
|
||||
def patch_model_config(codex_dir, config):
|
||||
"""Target 5: Set model, reasoning_effort, disable auto-update."""
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
|
||||
model = config["model"]
|
||||
effort = config.get("model_reasoning_effort", "high")
|
||||
|
||||
if (existing.get("model") == model and
|
||||
existing.get("model_reasoning_effort") == effort and
|
||||
existing.get("check_for_update_on_startup") is False):
|
||||
return True, "Already configured"
|
||||
|
||||
return True, f"Will set model={model}, effort={effort}"
|
||||
|
||||
|
||||
# ─── Target 6: System Environment ───────────────────────────────────────
|
||||
|
||||
def setup_env_vars(config):
|
||||
"""Target 6: Set OPENAI_BASE_URL and OPENAI_API_KEY in /etc/environment."""
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
|
||||
env_vars = {
|
||||
"OPENAI_BASE_URL": base_url,
|
||||
"OPENAI_API_KEY": config["api_key"],
|
||||
}
|
||||
|
||||
if IS_WINDOWS:
|
||||
# Use setx for Windows
|
||||
count = 0
|
||||
for key, val in env_vars.items():
|
||||
try:
|
||||
subprocess.run(
|
||||
["setx", key, val, "/M"],
|
||||
capture_output=True, timeout=10
|
||||
)
|
||||
count += 1
|
||||
except Exception:
|
||||
pass
|
||||
return count > 0, f"Set {count} env var(s) via setx"
|
||||
|
||||
# Linux/macOS: /etc/environment
|
||||
etc_env = "/etc/environment"
|
||||
try:
|
||||
content = ""
|
||||
if os.path.isfile(etc_env):
|
||||
with open(etc_env, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
changed = False
|
||||
for key, val in env_vars.items():
|
||||
line = f'{key}="{val}"'
|
||||
if key in content:
|
||||
# Update existing
|
||||
new_lines = []
|
||||
for l in content.split("\n"):
|
||||
if l.startswith(f"{key}="):
|
||||
if l != line:
|
||||
new_lines.append(line)
|
||||
changed = True
|
||||
else:
|
||||
new_lines.append(l)
|
||||
else:
|
||||
new_lines.append(l)
|
||||
content = "\n".join(new_lines)
|
||||
else:
|
||||
content = content.rstrip("\n") + "\n" + line + "\n"
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
with open(etc_env, "w") as f:
|
||||
f.write(content)
|
||||
return True, f"Set {len(env_vars)} env var(s) in {etc_env}"
|
||||
else:
|
||||
return True, "Env vars already set"
|
||||
|
||||
except PermissionError:
|
||||
return False, f"Permission denied: {etc_env} (run as root)"
|
||||
except Exception as e:
|
||||
return False, f"Error: {e}"
|
||||
|
||||
|
||||
# ─── Apply All Patches ──────────────────────────────────────────────────
|
||||
|
||||
def apply_all_patches(config, home_dir=None):
|
||||
"""Apply all 6 patch targets. Returns (all_ok, results_dict)."""
|
||||
if home_dir is None:
|
||||
home_dir = os.path.expanduser("~")
|
||||
|
||||
codex_dir = os.path.join(home_dir, ".codex")
|
||||
os.makedirs(codex_dir, exist_ok=True)
|
||||
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
results = {}
|
||||
all_ok = True
|
||||
|
||||
binary_path, version = detect_codex()
|
||||
print(f"\n{BOLD}Codex CLI Patcher{RESET}")
|
||||
print(f" Version: {CYAN}{version}{RESET}")
|
||||
print(f" Binary: {binary_path}")
|
||||
print(f" Proxy: {config['base_url']}")
|
||||
print()
|
||||
|
||||
# Read existing config
|
||||
existing = read_toml(config_path)
|
||||
|
||||
# Backup before any changes
|
||||
backup_file(config_path)
|
||||
|
||||
# Generate new config.toml (merge)
|
||||
new_content = generate_config_toml(existing, config)
|
||||
|
||||
# Write config.toml
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
f.write(new_content)
|
||||
|
||||
# Target 1: API endpoint
|
||||
ok, msg = patch_api_endpoint(codex_dir, config)
|
||||
results["api_endpoint"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 1: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
# Target 2: Authentication
|
||||
ok, msg = patch_auth(config, home_dir)
|
||||
results["authentication"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 2: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
# Target 3: Telemetry
|
||||
ok, msg = patch_telemetry(codex_dir, config)
|
||||
results["telemetry"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 3: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
# Target 4: Permissions
|
||||
ok, msg = patch_permissions(codex_dir, config)
|
||||
results["permissions"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 4: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
# Target 5: Model config
|
||||
ok, msg = patch_model_config(codex_dir, config)
|
||||
results["model_config"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 5: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
# Target 6: System env
|
||||
ok, msg = setup_env_vars(config)
|
||||
results["system_env"] = (ok, msg)
|
||||
print(f" {'[OK]' if ok else '[FAIL]':>8} Target 6: {msg}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
print()
|
||||
if all_ok:
|
||||
print(f" {GREEN}All patches applied successfully!{RESET}")
|
||||
else:
|
||||
print(f" {RED}Some patches failed. Check output above.{RESET}")
|
||||
|
||||
return all_ok, results
|
||||
|
||||
|
||||
# ─── Rollback ────────────────────────────────────────────────────────────
|
||||
|
||||
def rollback(home_dir=None):
|
||||
"""Restore config.toml from latest backup."""
|
||||
if home_dir is None:
|
||||
home_dir = os.path.expanduser("~")
|
||||
|
||||
codex_dir = os.path.join(home_dir, ".codex")
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
|
||||
# Find latest backup
|
||||
backups = sorted(Path(codex_dir).glob("config.toml.backup.*"), reverse=True)
|
||||
if not backups:
|
||||
print(f"{RED}No backups found in {codex_dir}{RESET}")
|
||||
return False
|
||||
|
||||
latest = backups[0]
|
||||
shutil.copy2(str(latest), config_path)
|
||||
print(f"{GREEN}Restored from {latest.name}{RESET}")
|
||||
return True
|
||||
|
||||
|
||||
# ─── Multi-User Support ─────────────────────────────────────────────────
|
||||
|
||||
def list_users():
|
||||
"""List system users with .codex/ or home dirs."""
|
||||
users = []
|
||||
try:
|
||||
import pwd
|
||||
for pw in pwd.getpwall():
|
||||
home = pw.pw_dir
|
||||
if not os.path.isdir(home):
|
||||
continue
|
||||
if pw.pw_uid < 1000 and pw.pw_uid != 0:
|
||||
continue
|
||||
if pw.pw_shell in ("/usr/sbin/nologin", "/bin/false"):
|
||||
continue
|
||||
users.append(pw)
|
||||
except ImportError:
|
||||
pass
|
||||
return users
|
||||
|
||||
|
||||
def patch_user(user_home, config):
|
||||
"""Patch a single user's ~/.codex/ config."""
|
||||
codex_dir = os.path.join(user_home, ".codex")
|
||||
os.makedirs(codex_dir, exist_ok=True)
|
||||
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
existing = read_toml(config_path)
|
||||
backup_file(config_path)
|
||||
|
||||
new_content = generate_config_toml(existing, config)
|
||||
with open(config_path, "w", encoding="utf-8") as f:
|
||||
f.write(new_content)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# ─── CLI ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Codex CLI Patcher — route Codex through custom AI proxy"
|
||||
)
|
||||
parser.add_argument("--apply", action="store_true", help="Apply all patches")
|
||||
parser.add_argument("--all", action="store_true", help="Patch all user accounts")
|
||||
parser.add_argument("--rollback", action="store_true", help="Restore from backup")
|
||||
parser.add_argument("--detect", action="store_true", help="Detect Codex installation")
|
||||
parser.add_argument("--validate", action="store_true", help="Validate config state")
|
||||
parser.add_argument("--config", type=str, help="Path to codex_config.json")
|
||||
parser.add_argument("--yes", action="store_true", help="Non-interactive mode")
|
||||
args = parser.parse_args()
|
||||
|
||||
config = load_config(args.config)
|
||||
|
||||
if args.detect:
|
||||
binary_path, version = detect_codex()
|
||||
print(f"Binary: {binary_path}")
|
||||
print(f"Version: {version}")
|
||||
return 0
|
||||
|
||||
if args.rollback:
|
||||
return 0 if rollback() else 1
|
||||
|
||||
if args.validate:
|
||||
# Import validator
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
from updater.config_validator import validate_all, print_validation_report
|
||||
codex_dir = os.path.expanduser("~/.codex")
|
||||
results = validate_all(codex_dir, config)
|
||||
print_validation_report(results)
|
||||
return 0 if all(r[1] == "GREEN" for r in results) else 1
|
||||
|
||||
if args.apply:
|
||||
# Apply for current user
|
||||
ok, results = apply_all_patches(config)
|
||||
|
||||
# Patch other users if --all
|
||||
if args.all:
|
||||
for user in list_users():
|
||||
if user.pw_dir == os.path.expanduser("~"):
|
||||
continue
|
||||
try:
|
||||
patch_user(user.pw_dir, config)
|
||||
print(f" Patched {user.pw_name}: {user.pw_dir}/.codex/config.toml")
|
||||
except Exception as e:
|
||||
print(f" {RED}Failed {user.pw_name}: {e}{RESET}")
|
||||
|
||||
return 0 if ok else 1
|
||||
|
||||
parser.print_help()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
63
codex_old/ucodex_install.sh
Executable file
63
codex_old/ucodex_install.sh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
# UnlimitedCoding — Codex CLI Installer
|
||||
# Downloads Codex binary from GitHub + applies config patches
|
||||
#
|
||||
# Usage:
|
||||
# curl -fsSL https://git.sensey24.ru/.../ucodex_install.sh | sudo bash
|
||||
|
||||
set -e
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
BOLD='\033[1m'
|
||||
NC='\033[0m'
|
||||
|
||||
echo -e "${BOLD}=== UnlimitedCoding — Codex CLI Installer ===${NC}"
|
||||
|
||||
# Check prerequisites
|
||||
for cmd in python3 curl; do
|
||||
if ! command -v "$cmd" &>/dev/null; then
|
||||
echo -e "${RED}Error: $cmd is required but not found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Check Python version (need 3.11+ for tomllib)
|
||||
PY_VER=$(python3 -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')")
|
||||
PY_MAJOR=$(echo "$PY_VER" | cut -d. -f1)
|
||||
PY_MINOR=$(echo "$PY_VER" | cut -d. -f2)
|
||||
if [ "$PY_MAJOR" -lt 3 ] || ([ "$PY_MAJOR" -eq 3 ] && [ "$PY_MINOR" -lt 11 ]); then
|
||||
echo -e "${RED}Error: Python 3.11+ required (found $PY_VER)${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Step 1: Install/update Codex binary
|
||||
echo -e "\n${BOLD}Step 1: Installing Codex CLI binary...${NC}"
|
||||
if [ -f "$SCRIPT_DIR/update-codex.sh" ]; then
|
||||
bash "$SCRIPT_DIR/update-codex.sh"
|
||||
else
|
||||
echo -e "${RED}update-codex.sh not found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 2: Apply config patches
|
||||
echo -e "\n${BOLD}Step 2: Applying config patches...${NC}"
|
||||
if [ ! -f "$SCRIPT_DIR/codex_config.json" ]; then
|
||||
echo -e "${YELLOW}codex_config.json not found, copying example...${NC}"
|
||||
cp "$SCRIPT_DIR/codex_config.example.json" "$SCRIPT_DIR/codex_config.json"
|
||||
echo -e "${YELLOW}Edit codex_config.json with your API endpoint and key, then re-run.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python3 "$SCRIPT_DIR/codex_patcher.py" --apply --config "$SCRIPT_DIR/codex_config.json"
|
||||
|
||||
# Step 3: Validate
|
||||
echo -e "\n${BOLD}Step 3: Validating...${NC}"
|
||||
python3 "$SCRIPT_DIR/update_codex_patcher.py" --validate
|
||||
|
||||
echo -e "\n${GREEN}=== Installation complete! ===${NC}"
|
||||
echo -e "Run: ${CYAN}codex${NC} to start"
|
||||
185
codex_old/update-codex.sh
Executable file
185
codex_old/update-codex.sh
Executable file
@@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Универсальный скрипт обновления OpenAI Codex CLI
|
||||
# Автоматически скачивает последнюю версию с GitHub Releases
|
||||
# Использует musl версию для совместимости со старыми системами
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Цвета для вывода
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Определяем путь к codex
|
||||
CODEX_PATH=$(which codex 2>/dev/null || echo "/usr/local/bin/codex")
|
||||
CODEX_DIR=$(dirname "$CODEX_PATH")
|
||||
TEMP_DIR="/tmp/codex-update-$$"
|
||||
GITHUB_API="https://api.github.com/repos/openai/codex/releases/latest"
|
||||
|
||||
# Определяем архитектуру - используем musl для совместимости
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
BINARY_SUFFIX="x86_64-unknown-linux-musl"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_SUFFIX="aarch64-unknown-linux-musl"
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Неподдерживаемая архитектура: $ARCH${NC}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE} OpenAI Codex CLI Updater${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# Функция получения текущей версии
|
||||
get_current_version() {
|
||||
if command -v codex &> /dev/null; then
|
||||
local ver=$(codex --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
if [ -n "$ver" ]; then
|
||||
echo "$ver"
|
||||
else
|
||||
echo "не работает"
|
||||
fi
|
||||
else
|
||||
echo "не установлен"
|
||||
fi
|
||||
}
|
||||
|
||||
# Функция получения последней версии с GitHub
|
||||
get_latest_version() {
|
||||
curl -s "$GITHUB_API" | grep -oP '"tag_name":\s*"rust-v\K[0-9]+\.[0-9]+\.[0-9]+' | head -1
|
||||
}
|
||||
|
||||
# Функция сравнения версий (возвращает 0 если нужно обновление)
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$1" "$2" | sort -V | tail -n 1)" != "$2"
|
||||
}
|
||||
|
||||
# Получаем версии
|
||||
echo -e "${YELLOW}Проверка версий...${NC}"
|
||||
CURRENT_VERSION=$(get_current_version)
|
||||
echo -e "Текущая версия: ${BLUE}$CURRENT_VERSION${NC}"
|
||||
|
||||
LATEST_VERSION=$(get_latest_version)
|
||||
if [ -z "$LATEST_VERSION" ]; then
|
||||
echo -e "${RED}Не удалось получить информацию о последней версии${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "Последняя версия: ${GREEN}$LATEST_VERSION${NC}"
|
||||
echo ""
|
||||
|
||||
# Проверяем нужно ли обновление
|
||||
if [ "$CURRENT_VERSION" = "$LATEST_VERSION" ]; then
|
||||
echo -e "${GREEN}✓ Codex уже обновлён до последней версии!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Если текущая не работает или не установлена - всегда обновляем
|
||||
if [ "$CURRENT_VERSION" != "не установлен" ] && [ "$CURRENT_VERSION" != "не работает" ]; then
|
||||
if ! version_gt "$LATEST_VERSION" "$CURRENT_VERSION"; then
|
||||
echo -e "${GREEN}✓ Текущая версия актуальна или новее${NC}"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Требуется обновление: $CURRENT_VERSION → $LATEST_VERSION${NC}"
|
||||
echo ""
|
||||
|
||||
# Формируем URL для скачивания
|
||||
DOWNLOAD_URL="https://github.com/openai/codex/releases/download/rust-v${LATEST_VERSION}/codex-${BINARY_SUFFIX}.tar.gz"
|
||||
echo -e "${BLUE}Архитектура: $ARCH (${BINARY_SUFFIX})${NC}"
|
||||
echo -e "${BLUE}URL: $DOWNLOAD_URL${NC}"
|
||||
echo ""
|
||||
|
||||
# Создаём временную директорию
|
||||
mkdir -p "$TEMP_DIR"
|
||||
cd "$TEMP_DIR"
|
||||
|
||||
# Скачиваем
|
||||
echo -e "${YELLOW}Скачивание...${NC}"
|
||||
if ! curl -L -# -o codex.tar.gz "$DOWNLOAD_URL"; then
|
||||
echo -e "${RED}Ошибка скачивания${NC}"
|
||||
rm -rf "$TEMP_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Распаковываем
|
||||
echo -e "${YELLOW}Распаковка...${NC}"
|
||||
tar -xzf codex.tar.gz
|
||||
|
||||
# Ищем бинарник (может называться codex или codex-$BINARY_SUFFIX)
|
||||
BINARY_FILE=""
|
||||
if [ -f "codex" ]; then
|
||||
BINARY_FILE="codex"
|
||||
elif [ -f "codex-${BINARY_SUFFIX}" ]; then
|
||||
BINARY_FILE="codex-${BINARY_SUFFIX}"
|
||||
else
|
||||
# Ищем любой файл начинающийся с codex (исключая .tar.gz)
|
||||
BINARY_FILE=$(find . -maxdepth 1 -name 'codex*' -type f ! -name '*.gz' | head -1)
|
||||
fi
|
||||
|
||||
if [ -z "$BINARY_FILE" ] || [ ! -f "$BINARY_FILE" ]; then
|
||||
echo -e "${RED}Бинарник codex не найден в архиве${NC}"
|
||||
ls -la
|
||||
rm -rf "$TEMP_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Найден бинарник: $BINARY_FILE${NC}"
|
||||
|
||||
# Проверяем запущен ли codex и завершаем
|
||||
if pgrep -x "codex" > /dev/null; then
|
||||
echo -e "${YELLOW}Обнаружен запущенный процесс codex, завершаем...${NC}"
|
||||
pkill -9 -x "codex" 2>/dev/null || true
|
||||
sleep 1
|
||||
echo -e "${GREEN}✓ Процесс завершён${NC}"
|
||||
fi
|
||||
|
||||
# Устанавливаем
|
||||
echo -e "${YELLOW}Установка в $CODEX_PATH...${NC}"
|
||||
chmod +x "$BINARY_FILE"
|
||||
|
||||
# Проверяем нужен ли sudo
|
||||
if [ -w "$CODEX_DIR" ]; then
|
||||
mv -f "$BINARY_FILE" "$CODEX_PATH"
|
||||
else
|
||||
echo -e "${YELLOW}Требуются права sudo для записи в $CODEX_DIR${NC}"
|
||||
sudo mv -f "$BINARY_FILE" "$CODEX_PATH"
|
||||
fi
|
||||
|
||||
# Очистка
|
||||
cd /
|
||||
rm -rf "$TEMP_DIR"
|
||||
|
||||
# Обновляем PATH кэш
|
||||
hash -r 2>/dev/null || true
|
||||
|
||||
# Проверяем результат
|
||||
echo ""
|
||||
NEW_VERSION=$(get_current_version)
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${GREEN}✓ Обновление завершено!${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "Было: ${RED}$CURRENT_VERSION${NC}"
|
||||
echo -e "Стало: ${GREEN}$NEW_VERSION${NC}"
|
||||
echo ""
|
||||
|
||||
# Финальная проверка
|
||||
if [ "$NEW_VERSION" = "$LATEST_VERSION" ]; then
|
||||
echo -e "${GREEN}✓ Версия успешно обновлена до $LATEST_VERSION${NC}"
|
||||
elif [ "$NEW_VERSION" = "не работает" ]; then
|
||||
echo -e "${RED}✗ Бинарник не запускается! Проверьте зависимости.${NC}"
|
||||
ldd "$CODEX_PATH" 2>&1 | grep "not found" || true
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Версия после установки: $NEW_VERSION (ожидалась $LATEST_VERSION)${NC}"
|
||||
fi
|
||||
239
codex_old/update_codex_patcher.py
Executable file
239
codex_old/update_codex_patcher.py
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Codex Patcher Update Pipeline — check, update, patch, validate, test.
|
||||
|
||||
Usage:
|
||||
python3 update_codex_patcher.py --check # Check for new version
|
||||
python3 update_codex_patcher.py --update # Download + install new binary
|
||||
python3 update_codex_patcher.py --validate # Validate 6 config targets
|
||||
python3 update_codex_patcher.py --patch # Apply config patches
|
||||
python3 update_codex_patcher.py --test # Integration test
|
||||
python3 update_codex_patcher.py --auto # Full cycle
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
# ANSI colors
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
RED = "\033[91m"
|
||||
CYAN = "\033[96m"
|
||||
BOLD = "\033[1m"
|
||||
RESET = "\033[0m"
|
||||
|
||||
|
||||
def color(text, c):
|
||||
return f"{c}{text}{RESET}"
|
||||
|
||||
|
||||
def load_config():
|
||||
"""Load codex_config.json."""
|
||||
config_path = SCRIPT_DIR / "codex_config.json"
|
||||
if not config_path.is_file():
|
||||
print(f" {color('Config not found: ' + str(config_path), RED)}")
|
||||
sys.exit(1)
|
||||
with open(config_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def cmd_check(config):
|
||||
"""Check GitHub releases for new Codex version."""
|
||||
print(f"\n{BOLD}Checking for updates...{RESET}")
|
||||
try:
|
||||
import urllib.request
|
||||
url = "https://api.github.com/repos/openai/codex/releases/latest"
|
||||
req = urllib.request.Request(url, headers={"User-Agent": "codex-patcher"})
|
||||
with urllib.request.urlopen(req, timeout=15) as resp:
|
||||
data = json.loads(resp.read())
|
||||
|
||||
latest_tag = data.get("tag_name", "")
|
||||
# Tag format: "rust-v0.111.0"
|
||||
latest_version = latest_tag.replace("rust-v", "").replace("v", "")
|
||||
|
||||
# Get installed version
|
||||
from codex_patcher import detect_codex
|
||||
_, installed = detect_codex()
|
||||
|
||||
print(f" Installed: {CYAN}{installed}{RESET}")
|
||||
print(f" Latest: {CYAN}{latest_version}{RESET}")
|
||||
|
||||
if installed == latest_version:
|
||||
print(f" {GREEN}Already up to date!{RESET}")
|
||||
return True
|
||||
else:
|
||||
print(f" {YELLOW}Update available: {installed} → {latest_version}{RESET}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f" {color(f'Error: {e}', RED)}")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_update(config):
|
||||
"""Download and install new Codex binary via update-codex.sh."""
|
||||
print(f"\n{BOLD}Updating Codex binary...{RESET}")
|
||||
update_script = SCRIPT_DIR / "update-codex.sh"
|
||||
if not update_script.is_file():
|
||||
print(f" {color(f'update-codex.sh not found at {update_script}', RED)}")
|
||||
return False
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["bash", str(update_script)],
|
||||
timeout=300
|
||||
)
|
||||
return result.returncode == 0
|
||||
except Exception as e:
|
||||
print(f" {color(f'Error: {e}', RED)}")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_validate(config):
|
||||
"""Validate all 6 config targets."""
|
||||
print(f"\n{BOLD}Validating config targets...{RESET}")
|
||||
try:
|
||||
from updater.config_validator import validate_all, print_validation_report
|
||||
codex_dir = os.path.expanduser("~/.codex")
|
||||
results = validate_all(codex_dir, config)
|
||||
counts = print_validation_report(results)
|
||||
|
||||
# Save report
|
||||
report_dir = SCRIPT_DIR / "reports"
|
||||
report_dir.mkdir(exist_ok=True)
|
||||
report_path = report_dir / "validation_report.json"
|
||||
summary = {
|
||||
"targets": [
|
||||
{"name": t.name, "status": s, "message": m}
|
||||
for t, s, m in results
|
||||
],
|
||||
"counts": counts,
|
||||
}
|
||||
with open(report_path, "w") as f:
|
||||
json.dump(summary, f, indent=2)
|
||||
print(f"\n Report saved: {report_path}")
|
||||
|
||||
return counts.get("RED", 0) == 0
|
||||
except Exception as e:
|
||||
print(f" {color(f'Error: {e}', RED)}")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_patch(config):
|
||||
"""Apply config patches."""
|
||||
print(f"\n{BOLD}Applying patches...{RESET}")
|
||||
try:
|
||||
from codex_patcher import apply_all_patches
|
||||
ok, results = apply_all_patches(config)
|
||||
return ok
|
||||
except Exception as e:
|
||||
print(f" {color(f'Error: {e}', RED)}")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_test(config):
|
||||
"""Run integration test."""
|
||||
print(f"\n{BOLD}Running integration test...{RESET}")
|
||||
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
|
||||
env = os.environ.copy()
|
||||
env["OPENAI_BASE_URL"] = base_url
|
||||
env["OPENAI_API_KEY"] = config["api_key"]
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["codex", "exec",
|
||||
"--sandbox", "danger-full-access",
|
||||
"Reply with just the number 42"],
|
||||
capture_output=True, text=True, timeout=60, env=env
|
||||
)
|
||||
|
||||
output = result.stdout.strip()
|
||||
print(f" Output: {output[:200]}")
|
||||
|
||||
if "42" in output:
|
||||
print(f" {GREEN}Test passed!{RESET}")
|
||||
return True
|
||||
else:
|
||||
print(f" {YELLOW}Unexpected output (no '42' found){RESET}")
|
||||
if result.stderr:
|
||||
print(f" Stderr: {result.stderr[:200]}")
|
||||
return False
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f" {RED}Test timed out (60s){RESET}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f" {color(f'Error: {e}', RED)}")
|
||||
return False
|
||||
|
||||
|
||||
def cmd_auto(config):
|
||||
"""Full cycle: check → update → patch → validate → test."""
|
||||
print(f"\n{BOLD}{'=' * 50}{RESET}")
|
||||
print(f"{BOLD} Codex Patcher — Auto Update Pipeline{RESET}")
|
||||
print(f"{BOLD}{'=' * 50}{RESET}")
|
||||
|
||||
steps = [
|
||||
("Check version", cmd_check),
|
||||
("Update binary", cmd_update),
|
||||
("Apply patches", cmd_patch),
|
||||
("Validate", cmd_validate),
|
||||
("Test", cmd_test),
|
||||
]
|
||||
|
||||
for name, func in steps:
|
||||
ok = func(config)
|
||||
if not ok and name not in ("Check version",):
|
||||
print(f"\n {RED}Pipeline stopped at: {name}{RESET}")
|
||||
return False
|
||||
|
||||
print(f"\n{GREEN}{'=' * 50}{RESET}")
|
||||
print(f"{GREEN} Pipeline completed successfully!{RESET}")
|
||||
print(f"{GREEN}{'=' * 50}{RESET}")
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Codex Patcher Update Pipeline"
|
||||
)
|
||||
parser.add_argument("--check", action="store_true", help="Check for new version")
|
||||
parser.add_argument("--update", action="store_true", help="Update binary")
|
||||
parser.add_argument("--validate", action="store_true", help="Validate config")
|
||||
parser.add_argument("--patch", action="store_true", help="Apply patches")
|
||||
parser.add_argument("--test", action="store_true", help="Run integration test")
|
||||
parser.add_argument("--auto", action="store_true", help="Full auto cycle")
|
||||
args = parser.parse_args()
|
||||
|
||||
config = load_config()
|
||||
|
||||
if args.auto:
|
||||
return 0 if cmd_auto(config) else 1
|
||||
if args.check:
|
||||
return 0 if cmd_check(config) else 1
|
||||
if args.update:
|
||||
return 0 if cmd_update(config) else 1
|
||||
if args.validate:
|
||||
return 0 if cmd_validate(config) else 1
|
||||
if args.patch:
|
||||
return 0 if cmd_patch(config) else 1
|
||||
if args.test:
|
||||
return 0 if cmd_test(config) else 1
|
||||
|
||||
parser.print_help()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
0
codex_old/updater/__init__.py
Executable file
0
codex_old/updater/__init__.py
Executable file
259
codex_old/updater/config_validator.py
Executable file
259
codex_old/updater/config_validator.py
Executable file
@@ -0,0 +1,259 @@
|
||||
"""Config validator for Codex Patcher — validates 6 config targets.
|
||||
|
||||
Unlike Claude/Gemini patchers (regex-based), Codex validation is state-based:
|
||||
checks config.toml values and environment variables.
|
||||
"""
|
||||
|
||||
import os
|
||||
import tomllib
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigTarget:
|
||||
name: str
|
||||
description: str
|
||||
check_key: str # "config_toml" | "env" | "auth"
|
||||
|
||||
|
||||
PATCH_TARGETS = [
|
||||
ConfigTarget(
|
||||
name="api_endpoint",
|
||||
description="Custom proxy via model_providers",
|
||||
check_key="config_toml",
|
||||
),
|
||||
ConfigTarget(
|
||||
name="authentication",
|
||||
description="API key auth configured",
|
||||
check_key="auth",
|
||||
),
|
||||
ConfigTarget(
|
||||
name="analytics_disabled",
|
||||
description="Analytics/telemetry disabled",
|
||||
check_key="config_toml",
|
||||
),
|
||||
ConfigTarget(
|
||||
name="approval_bypass",
|
||||
description="Approval policy set to never",
|
||||
check_key="config_toml",
|
||||
),
|
||||
ConfigTarget(
|
||||
name="sandbox_bypass",
|
||||
description="Sandbox set to danger-full-access",
|
||||
check_key="config_toml",
|
||||
),
|
||||
ConfigTarget(
|
||||
name="env_vars",
|
||||
description="System environment variables configured",
|
||||
check_key="env",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def _read_toml(path):
|
||||
"""Read TOML file, return dict or empty dict."""
|
||||
if not os.path.isfile(path):
|
||||
return None
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
|
||||
|
||||
def _check_api_endpoint(toml_data, config):
|
||||
"""Check Target 1: model_providers.custom with correct base_url."""
|
||||
if toml_data is None:
|
||||
return "RED", "config.toml not found"
|
||||
|
||||
mp = toml_data.get("model_providers", {})
|
||||
custom = mp.get("custom", {}) if isinstance(mp, dict) else {}
|
||||
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
|
||||
if not custom:
|
||||
return "RED", "model_providers.custom section missing"
|
||||
|
||||
if custom.get("base_url") != base_url:
|
||||
return "YELLOW", f"base_url mismatch: {custom.get('base_url')} != {base_url}"
|
||||
|
||||
if toml_data.get("model_provider") != "custom":
|
||||
return "YELLOW", "model_provider != 'custom'"
|
||||
|
||||
return "GREEN", f"base_url={base_url}"
|
||||
|
||||
|
||||
def _check_auth(codex_dir, config):
|
||||
"""Check Target 2: API key authentication."""
|
||||
# Check env var
|
||||
env_key = os.environ.get("OPENAI_API_KEY", "")
|
||||
if env_key == config["api_key"]:
|
||||
return "GREEN", "OPENAI_API_KEY set correctly"
|
||||
|
||||
# Check /etc/environment
|
||||
etc_env = "/etc/environment"
|
||||
if os.path.isfile(etc_env):
|
||||
with open(etc_env) as f:
|
||||
content = f.read()
|
||||
if config["api_key"] in content:
|
||||
return "GREEN", "API key in /etc/environment"
|
||||
|
||||
if env_key:
|
||||
return "YELLOW", "OPENAI_API_KEY set but different value"
|
||||
|
||||
return "RED", "OPENAI_API_KEY not set"
|
||||
|
||||
|
||||
def _check_analytics(toml_data):
|
||||
"""Check Target 3: analytics disabled."""
|
||||
if toml_data is None:
|
||||
return "RED", "config.toml not found"
|
||||
|
||||
analytics = toml_data.get("analytics", {})
|
||||
if not isinstance(analytics, dict):
|
||||
return "RED", "[analytics] section missing"
|
||||
|
||||
if analytics.get("enabled") is False:
|
||||
return "GREEN", "analytics.enabled = false"
|
||||
|
||||
if "enabled" not in analytics:
|
||||
return "YELLOW", "[analytics] exists but 'enabled' key missing"
|
||||
|
||||
return "YELLOW", f"analytics.enabled = {analytics.get('enabled')}"
|
||||
|
||||
|
||||
def _check_approval(toml_data, config):
|
||||
"""Check Target 4: approval_policy."""
|
||||
if toml_data is None:
|
||||
return "RED", "config.toml not found"
|
||||
|
||||
target_policy = config.get("approval_policy", "never")
|
||||
current = toml_data.get("approval_policy")
|
||||
|
||||
if current == target_policy:
|
||||
return "GREEN", f'approval_policy = "{target_policy}"'
|
||||
|
||||
if current is not None:
|
||||
return "YELLOW", f'approval_policy = "{current}" (expected "{target_policy}")'
|
||||
|
||||
return "RED", "approval_policy not set"
|
||||
|
||||
|
||||
def _check_sandbox(toml_data, config):
|
||||
"""Check Target 5: sandbox_mode."""
|
||||
if toml_data is None:
|
||||
return "RED", "config.toml not found"
|
||||
|
||||
target_mode = config.get("sandbox_mode", "danger-full-access")
|
||||
current = toml_data.get("sandbox_mode")
|
||||
|
||||
if current == target_mode:
|
||||
return "GREEN", f'sandbox_mode = "{target_mode}"'
|
||||
|
||||
if current is not None:
|
||||
return "YELLOW", f'sandbox_mode = "{current}" (expected "{target_mode}")'
|
||||
|
||||
return "RED", "sandbox_mode not set"
|
||||
|
||||
|
||||
def _check_env_vars(config):
|
||||
"""Check Target 6: system environment variables."""
|
||||
base_url = config["base_url"].rstrip("/")
|
||||
if not base_url.endswith("/v1"):
|
||||
base_url += "/v1"
|
||||
|
||||
etc_env = "/etc/environment"
|
||||
if not os.path.isfile(etc_env):
|
||||
return "RED", "/etc/environment not found"
|
||||
|
||||
with open(etc_env) as f:
|
||||
content = f.read()
|
||||
|
||||
has_base = "OPENAI_BASE_URL" in content
|
||||
has_key = "OPENAI_API_KEY" in content
|
||||
|
||||
if has_base and has_key:
|
||||
return "GREEN", "OPENAI_BASE_URL + OPENAI_API_KEY set"
|
||||
|
||||
missing = []
|
||||
if not has_base:
|
||||
missing.append("OPENAI_BASE_URL")
|
||||
if not has_key:
|
||||
missing.append("OPENAI_API_KEY")
|
||||
|
||||
return "YELLOW" if (has_base or has_key) else "RED", f"Missing: {', '.join(missing)}"
|
||||
|
||||
|
||||
def validate_all(codex_dir, config):
|
||||
"""Validate all 6 targets. Returns list of (target, status, message) tuples."""
|
||||
config_path = os.path.join(codex_dir, "config.toml")
|
||||
toml_data = _read_toml(config_path)
|
||||
|
||||
results = []
|
||||
|
||||
# Target 1: API endpoint
|
||||
status, msg = _check_api_endpoint(toml_data, config)
|
||||
results.append((PATCH_TARGETS[0], status, msg))
|
||||
|
||||
# Target 2: Auth
|
||||
status, msg = _check_auth(codex_dir, config)
|
||||
results.append((PATCH_TARGETS[1], status, msg))
|
||||
|
||||
# Target 3: Analytics
|
||||
status, msg = _check_analytics(toml_data)
|
||||
results.append((PATCH_TARGETS[2], status, msg))
|
||||
|
||||
# Target 4: Approval
|
||||
status, msg = _check_approval(toml_data, config)
|
||||
results.append((PATCH_TARGETS[3], status, msg))
|
||||
|
||||
# Target 5: Sandbox
|
||||
status, msg = _check_sandbox(toml_data, config)
|
||||
results.append((PATCH_TARGETS[4], status, msg))
|
||||
|
||||
# Target 6: Env vars
|
||||
status, msg = _check_env_vars(config)
|
||||
results.append((PATCH_TARGETS[5], status, msg))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# ANSI colors
|
||||
GREEN_C = "\033[92m"
|
||||
YELLOW_C = "\033[93m"
|
||||
RED_C = "\033[91m"
|
||||
BOLD_C = "\033[1m"
|
||||
RESET_C = "\033[0m"
|
||||
|
||||
STATUS_COLORS = {
|
||||
"GREEN": GREEN_C,
|
||||
"YELLOW": YELLOW_C,
|
||||
"RED": RED_C,
|
||||
}
|
||||
|
||||
|
||||
def print_validation_report(results):
|
||||
"""Print formatted validation report."""
|
||||
print(f"\n {BOLD_C}Codex Patcher — Validation Report{RESET_C}")
|
||||
print(" " + "─" * 50)
|
||||
|
||||
counts = {"GREEN": 0, "YELLOW": 0, "RED": 0}
|
||||
for target, status, msg in results:
|
||||
color = STATUS_COLORS.get(status, "")
|
||||
print(f" {color}[{status:6s}]{RESET_C} {target.name}: {target.description}")
|
||||
if status != "GREEN":
|
||||
print(f" → {msg}")
|
||||
counts[status] = counts.get(status, 0) + 1
|
||||
|
||||
print(" " + "─" * 50)
|
||||
total = len(results)
|
||||
print(f" {GREEN_C}{counts['GREEN']}{RESET_C}/{total} GREEN "
|
||||
f"{YELLOW_C}{counts['YELLOW']}{RESET_C} YELLOW "
|
||||
f"{RED_C}{counts['RED']}{RESET_C} RED")
|
||||
|
||||
if counts["GREEN"] == total:
|
||||
print(f"\n {GREEN_C}All targets configured correctly!{RESET_C}")
|
||||
elif counts["RED"] > 0:
|
||||
print(f"\n {RED_C}Critical targets missing. Run: python3 codex_patcher.py --apply{RESET_C}")
|
||||
|
||||
return counts
|
||||
99
qwen_old/README.md
Executable file
99
qwen_old/README.md
Executable file
@@ -0,0 +1,99 @@
|
||||
# Qwen Code Patcher
|
||||
|
||||
Patches [QwenCode CLI](https://github.com/QwenLM/qwen-code) (`@qwen-code/qwen-code`) to route all API requests through a custom AI proxy, disable telemetry, and auto-configure settings.
|
||||
|
||||
**[RU]** Патчер для QwenCode CLI — перенаправляет API запросы через пользовательский AI прокси, отключает телеметрию, автоматически настраивает окружение.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# 1. Install from private registry
|
||||
npm config set @qwen-code:registry https://npm.sensey24.ru/
|
||||
npm install -g @qwen-code/qwen-code
|
||||
|
||||
# 2. Apply settings (env vars + settings.json)
|
||||
python3 qwen_patcher.py --settings-only
|
||||
|
||||
# 3. Verify
|
||||
qwen -p "Say hello"
|
||||
```
|
||||
|
||||
## Windows
|
||||
|
||||
```powershell
|
||||
npm config set "@qwen-code:registry" "https://npm.sensey24.ru/"
|
||||
npm install -g @qwen-code/qwen-code
|
||||
python3 qwen_patcher.py --settings-only
|
||||
```
|
||||
|
||||
## Update
|
||||
|
||||
Same two commands — npm will pull the latest patched version:
|
||||
|
||||
```bash
|
||||
npm config set @qwen-code:registry https://npm.sensey24.ru/
|
||||
npm install -g @qwen-code/qwen-code
|
||||
```
|
||||
|
||||
## What Gets Patched
|
||||
|
||||
| # | Target | Description |
|
||||
|---|--------|-------------|
|
||||
| 1 | telemetry_flag | Force `getTelemetryEnabled()` -> false |
|
||||
| 2 | telemetry_log_prompts | Force `getTelemetryLogPromptsEnabled()` -> false |
|
||||
| 3 | telemetry_init_guard | Early return in `initializeTelemetry()` |
|
||||
| 4 | dashscope_base_url | `DEFAULT_DASHSCOPE_BASE_URL` -> proxy |
|
||||
| 5 | coding_plan_urls | `coding.dashscope.aliyuncs.com` -> proxy |
|
||||
| 6 | default_model | Validate `DEFAULT_QWEN_MODEL = "coder-model"` |
|
||||
| 7 | mainline_model | Validate `MAINLINE_CODER_MODEL = "qwen3.5-plus"` |
|
||||
| 8 | auto_update_registry | `registry.npmjs.org` -> private registry |
|
||||
| 9 | auto_update_command | Add `--registry` to update commands |
|
||||
| 10 | user_settings | Auth type=openai, telemetry=false, model |
|
||||
| 11 | trusted_folders | Trust /home, /root, /tmp |
|
||||
| 12 | system_env | OPENAI_API_KEY, OPENAI_BASE_URL, telemetry vars |
|
||||
|
||||
Targets 1-9 are pre-patched in the npm package. Targets 10-12 require running `qwen_patcher.py --settings-only`.
|
||||
|
||||
## Models
|
||||
|
||||
| Model | Description |
|
||||
|-------|-------------|
|
||||
| `qwen3.5-plus` | Qwen 3.5 Plus — default |
|
||||
| `coder-model` | Direct OAuth model name |
|
||||
| `qwen3-coder-plus` | Qwen3 Coder Plus |
|
||||
| `qwen3-coder-flash` | Qwen3 Coder Flash (fast) |
|
||||
|
||||
## CLI Usage
|
||||
|
||||
```bash
|
||||
# Detection
|
||||
python3 qwen_patcher.py --detect
|
||||
|
||||
# Validation (GREEN/YELLOW/RED for each target)
|
||||
python3 qwen_patcher.py --validate
|
||||
|
||||
# Full patch (cli.js + settings + env)
|
||||
python3 qwen_patcher.py --apply
|
||||
|
||||
# Settings only (no cli.js modification)
|
||||
python3 qwen_patcher.py --settings-only
|
||||
|
||||
# Rollback cli.js from backup
|
||||
python3 qwen_patcher.py --rollback
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "model not supported" error
|
||||
|
||||
Make sure your proxy has `qwen3.5-plus` mapped to `coder-model` in the OAuth model alias config. The Qwen OAuth endpoint only accepts `coder-model` as the model name.
|
||||
|
||||
### CLI doesn't start after patching
|
||||
|
||||
If you applied `--apply` and the CLI fails to start, run `--rollback` to restore from backup, then use the pre-patched npm package instead:
|
||||
|
||||
```bash
|
||||
python3 qwen_patcher.py --rollback
|
||||
npm install -g @qwen-code/qwen-code # Re-installs pre-patched version
|
||||
python3 qwen_patcher.py --settings-only
|
||||
```
|
||||
15
qwen_old/qwen_config.json
Executable file
15
qwen_old/qwen_config.json
Executable file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"base_url": "https://ai.37-187-136-86.sslip.io",
|
||||
"api_key": "ClauderAPI",
|
||||
"default_model": "qwen3.5-plus",
|
||||
"models": [
|
||||
"qwen3.5-plus",
|
||||
"coder-model",
|
||||
"qwen3-coder-plus",
|
||||
"qwen3-coder-flash"
|
||||
],
|
||||
"target_version": "0.11.1",
|
||||
"telemetry_enabled": false,
|
||||
"npm_package": "@qwen-code/qwen-code",
|
||||
"npm_registry": "https://npm.sensey24.ru"
|
||||
}
|
||||
492
qwen_old/qwen_patcher.py
Executable file
492
qwen_old/qwen_patcher.py
Executable file
@@ -0,0 +1,492 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Qwen Code Patcher — patches QwenCode CLI to route through custom AI proxy.
|
||||
|
||||
Targets:
|
||||
1. telemetry_flag — force getTelemetryEnabled() → false
|
||||
2. telemetry_log_prompts — force getTelemetryLogPromptsEnabled() → false
|
||||
3. telemetry_init_guard — early return in initializeTelemetry()
|
||||
4. dashscope_base_url — DEFAULT_DASHSCOPE_BASE_URL → proxy
|
||||
5. coding_plan_urls — coding.dashscope.aliyuncs.com → proxy
|
||||
6. default_model — validate DEFAULT_QWEN_MODEL (no change)
|
||||
7. mainline_model — validate MAINLINE_CODER_MODEL (no change)
|
||||
8. auto_update_registry — registry.npmjs.org → npm.sensey24.ru
|
||||
9. auto_update_command — add --registry to update commands
|
||||
10. user_settings — ~/.qwen/settings.json (auth + telemetry)
|
||||
11. trusted_folders — ~/.qwen/trustedFolders.json
|
||||
12. system_env — env vars injection
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import platform
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
# ─── Constants ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
CONFIG_PATH = SCRIPT_DIR / "qwen_config.json"
|
||||
|
||||
IS_WINDOWS = platform.system() == "Windows"
|
||||
IS_MACOS = platform.system() == "Darwin"
|
||||
|
||||
NPM_PACKAGE = "@qwen-code/qwen-code"
|
||||
CLI_JS_FILENAME = "cli.js"
|
||||
|
||||
PATCH_MARKER = "/* QWEN_PATCHED */"
|
||||
|
||||
# ANSI colors
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
RED = "\033[91m"
|
||||
CYAN = "\033[96m"
|
||||
BOLD = "\033[1m"
|
||||
RESET = "\033[0m"
|
||||
|
||||
|
||||
# ─── Utilities ──────────────────────────────────────────────────────────
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
|
||||
def load_config(config_path=None):
|
||||
"""Load patcher configuration from JSON file."""
|
||||
path = Path(config_path) if config_path else CONFIG_PATH
|
||||
if not path.is_file():
|
||||
eprint(f"{RED}Config not found: {path}{RESET}")
|
||||
sys.exit(1)
|
||||
with open(path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def read_version(qwen_root):
|
||||
"""Read version from package.json."""
|
||||
pkg = Path(qwen_root) / "package.json"
|
||||
if pkg.is_file():
|
||||
with open(pkg) as f:
|
||||
return json.load(f).get("version", "unknown")
|
||||
return "unknown"
|
||||
|
||||
|
||||
# ─── Detection ──────────────────────────────────────────────────────────
|
||||
|
||||
def _candidate_paths():
|
||||
"""Generate candidate paths for QwenCode CLI installation."""
|
||||
if IS_WINDOWS:
|
||||
appdata = os.environ.get("APPDATA", "")
|
||||
if appdata:
|
||||
yield Path(appdata) / "npm" / "node_modules" / "@qwen-code" / "qwen-code"
|
||||
localappdata = os.environ.get("LOCALAPPDATA", "")
|
||||
if localappdata:
|
||||
yield Path(localappdata) / "npm" / "node_modules" / "@qwen-code" / "qwen-code"
|
||||
else:
|
||||
yield Path("/usr/lib/node_modules/@qwen-code/qwen-code")
|
||||
yield Path("/usr/local/lib/node_modules/@qwen-code/qwen-code")
|
||||
if IS_MACOS:
|
||||
yield Path("/opt/homebrew/lib/node_modules/@qwen-code/qwen-code")
|
||||
home = Path.home()
|
||||
yield home / ".local" / "lib" / "node_modules" / "@qwen-code" / "qwen-code"
|
||||
yield home / ".npm-global" / "lib" / "node_modules" / "@qwen-code" / "qwen-code"
|
||||
# nvm
|
||||
nvm_dir = os.environ.get("NVM_DIR", str(home / ".nvm"))
|
||||
nvm_path = Path(nvm_dir)
|
||||
if nvm_path.is_dir():
|
||||
for ver_dir in sorted(nvm_path.glob("versions/node/v*"), reverse=True):
|
||||
yield ver_dir / "lib" / "node_modules" / "@qwen-code" / "qwen-code"
|
||||
|
||||
|
||||
def detect_qwen():
|
||||
"""Find QwenCode CLI installation. Returns (qwen_root, cli_js_path) or (None, None)."""
|
||||
for root in _candidate_paths():
|
||||
cli_js = root / CLI_JS_FILENAME
|
||||
if cli_js.is_file():
|
||||
return str(root), str(cli_js)
|
||||
return None, None
|
||||
|
||||
|
||||
# ─── Patching: cli.js targets (1-9) ────────────────────────────────────
|
||||
|
||||
def _backup_file(filepath):
|
||||
"""Create backup of a file."""
|
||||
backup = filepath + ".backup"
|
||||
if not os.path.isfile(backup):
|
||||
shutil.copy2(filepath, backup)
|
||||
print(f" {CYAN}Backup:{RESET} {backup}")
|
||||
|
||||
|
||||
def _already_patched(content):
|
||||
"""Check if file already has patch marker."""
|
||||
return PATCH_MARKER in content
|
||||
|
||||
|
||||
def patch_cli_js(cli_js_path, config):
|
||||
"""Apply all 9 cli.js patch targets. Returns dict of {target: status}."""
|
||||
_backup_file(cli_js_path)
|
||||
|
||||
with open(cli_js_path, "r", encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
|
||||
if _already_patched(content):
|
||||
print(f" {GREEN}cli.js already patched (marker found){RESET}")
|
||||
return {"cli_js": "already_patched"}
|
||||
|
||||
base_url = config["base_url"]
|
||||
npm_registry = config.get("npm_registry", "https://npm.sensey24.ru")
|
||||
results = {}
|
||||
original = content
|
||||
|
||||
# Target 1: TELEMETRY_FLAG
|
||||
pat1 = r'(getTelemetryEnabled\(\)\s*\{)\s*return\s+this\.telemetrySettings\.enabled\s*\?\?\s*false;'
|
||||
rep1 = r'\1 return false; /* QWEN_PATCHED */'
|
||||
content, n = re.subn(pat1, rep1, content)
|
||||
results["telemetry_flag"] = f"OK ({n})" if n > 0 else "SKIP"
|
||||
|
||||
# Target 2: TELEMETRY_LOG_PROMPTS
|
||||
pat2 = r'(getTelemetryLogPromptsEnabled\(\)\s*\{)\s*return\s+this\.telemetrySettings\.logPrompts\s*\?\?\s*true;'
|
||||
rep2 = r'\1 return false; /* QWEN_PATCHED */'
|
||||
content, n = re.subn(pat2, rep2, content)
|
||||
results["telemetry_log_prompts"] = f"OK ({n})" if n > 0 else "SKIP"
|
||||
|
||||
# Target 3: TELEMETRY_INIT_GUARD
|
||||
pat3 = r'(function initializeTelemetry\(config2\)\s*\{)\s*\n(\s*)if\s*\(telemetryInitialized'
|
||||
rep3 = r'\1\n\2return; /* QWEN_PATCHED: telemetry disabled */\n\2if (telemetryInitialized'
|
||||
content, n = re.subn(pat3, rep3, content)
|
||||
results["telemetry_init_guard"] = f"OK ({n})" if n > 0 else "SKIP"
|
||||
|
||||
# Target 4: DASHSCOPE_BASE_URL
|
||||
pat4 = r'(DEFAULT_DASHSCOPE_BASE_URL\s*=\s*)"https://dashscope\.aliyuncs\.com/compatible-mode/v1"'
|
||||
rep4 = rf'\1"{base_url}/v1"'
|
||||
content, n = re.subn(pat4, rep4, content)
|
||||
results["dashscope_base_url"] = f"OK ({n})" if n > 0 else "SKIP"
|
||||
|
||||
# Target 5: CODING_PLAN_URLS (string replace, not regex)
|
||||
count5 = 0
|
||||
for old_url in [
|
||||
"https://coding.dashscope.aliyuncs.com/v1",
|
||||
"https://coding-intl.dashscope.aliyuncs.com/v1",
|
||||
]:
|
||||
new_url = f"{base_url}/v1"
|
||||
c = content.count(old_url)
|
||||
content = content.replace(old_url, new_url)
|
||||
count5 += c
|
||||
results["coding_plan_urls"] = f"OK ({count5})" if count5 > 0 else "SKIP"
|
||||
|
||||
# Target 6: DEFAULT_MODEL (validate only)
|
||||
if re.search(r'DEFAULT_QWEN_MODEL\s*=\s*"coder-model"', content):
|
||||
results["default_model"] = "OK (validated)"
|
||||
else:
|
||||
results["default_model"] = "WARN (unexpected value)"
|
||||
|
||||
# Target 7: MAINLINE_MODEL (validate only)
|
||||
if re.search(r'MAINLINE_CODER_MODEL\s*=\s*"qwen3\.5-plus"', content):
|
||||
results["mainline_model"] = "OK (validated)"
|
||||
else:
|
||||
results["mainline_model"] = "WARN (unexpected value)"
|
||||
|
||||
# Target 8: AUTO_UPDATE_REGISTRY
|
||||
count8 = 0
|
||||
old_reg = '"https://registry.npmjs.org/"'
|
||||
new_reg = f'"{npm_registry}/"'
|
||||
c = content.count(old_reg)
|
||||
content = content.replace(old_reg, new_reg)
|
||||
count8 += c
|
||||
# Also handle single-quoted variant
|
||||
old_reg_sq = "'https://registry.npmjs.org/'"
|
||||
new_reg_sq = f"'{npm_registry}/'"
|
||||
c = content.count(old_reg_sq)
|
||||
content = content.replace(old_reg_sq, new_reg_sq)
|
||||
count8 += c
|
||||
results["auto_update_registry"] = f"OK ({count8})" if count8 > 0 else "SKIP"
|
||||
|
||||
# Target 9: AUTO_UPDATE_COMMAND
|
||||
old_cmd = '"npm install -g @qwen-code/qwen-code@latest"'
|
||||
new_cmd = f'"npm install -g @qwen-code/qwen-code@latest --registry {npm_registry}"'
|
||||
c = content.count(old_cmd)
|
||||
content = content.replace(old_cmd, new_cmd)
|
||||
results["auto_update_command"] = f"OK ({c})" if c > 0 else "SKIP"
|
||||
|
||||
# Add patch marker after shebang line (preserve shebang on line 1)
|
||||
if content != original:
|
||||
if content.startswith("#!"):
|
||||
first_nl = content.index("\n")
|
||||
content = content[:first_nl + 1] + PATCH_MARKER + "\n" + content[first_nl + 1:]
|
||||
else:
|
||||
content = PATCH_MARKER + "\n" + content
|
||||
|
||||
# Write patched file
|
||||
with open(cli_js_path, "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# ─── Patching: settings targets (10-12) ────────────────────────────────
|
||||
|
||||
def patch_user_settings(config):
|
||||
"""Configure ~/.qwen/settings.json (Target 10)."""
|
||||
qwen_dir = Path.home() / ".qwen"
|
||||
qwen_dir.mkdir(parents=True, exist_ok=True)
|
||||
settings_path = qwen_dir / "settings.json"
|
||||
|
||||
existing = {}
|
||||
if settings_path.is_file():
|
||||
try:
|
||||
with open(settings_path) as f:
|
||||
existing = json.load(f)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
# Deep merge
|
||||
if "security" not in existing:
|
||||
existing["security"] = {}
|
||||
if "auth" not in existing["security"]:
|
||||
existing["security"]["auth"] = {}
|
||||
existing["security"]["auth"]["selectedType"] = "openai"
|
||||
|
||||
if "telemetry" not in existing:
|
||||
existing["telemetry"] = {}
|
||||
existing["telemetry"]["enabled"] = False
|
||||
existing["telemetry"]["logPrompts"] = False
|
||||
|
||||
if "model" not in existing:
|
||||
existing["model"] = {}
|
||||
existing["model"]["name"] = config.get("default_model", "qwen3.5-plus")
|
||||
|
||||
with open(settings_path, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
|
||||
print(f" {GREEN}Settings:{RESET} {settings_path}")
|
||||
return "OK"
|
||||
|
||||
|
||||
def patch_trusted_folders(config):
|
||||
"""Create/update ~/.qwen/trustedFolders.json (Target 11)."""
|
||||
qwen_dir = Path.home() / ".qwen"
|
||||
qwen_dir.mkdir(parents=True, exist_ok=True)
|
||||
tf_path = qwen_dir / "trustedFolders.json"
|
||||
|
||||
existing = {}
|
||||
if tf_path.is_file():
|
||||
try:
|
||||
with open(tf_path) as f:
|
||||
existing = json.load(f)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
trust_paths = config.get("trust_paths", ["/home", "/root", "/tmp"])
|
||||
for p in trust_paths:
|
||||
if p not in existing:
|
||||
existing[p] = "TRUST_PARENT"
|
||||
|
||||
# Also trust home directory
|
||||
home = str(Path.home())
|
||||
if home not in existing:
|
||||
existing[home] = "TRUST_PARENT"
|
||||
|
||||
with open(tf_path, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
|
||||
print(f" {GREEN}Trusted folders:{RESET} {tf_path}")
|
||||
return "OK"
|
||||
|
||||
|
||||
def setup_env_vars(config):
|
||||
"""Set environment variables (Target 12)."""
|
||||
base_url = config["base_url"]
|
||||
api_key = config.get("api_key", "")
|
||||
default_model = config.get("default_model", "qwen3.5-plus")
|
||||
|
||||
env_vars = {
|
||||
"OPENAI_API_KEY": api_key,
|
||||
"OPENAI_BASE_URL": f"{base_url}/v1",
|
||||
"OPENAI_MODEL": default_model,
|
||||
"GEMINI_TELEMETRY_ENABLED": "false",
|
||||
"GEMINI_TELEMETRY_LOG_PROMPTS": "false",
|
||||
}
|
||||
|
||||
if IS_WINDOWS:
|
||||
import subprocess
|
||||
for k, v in env_vars.items():
|
||||
subprocess.run(["setx", k, v], capture_output=True)
|
||||
print(f" {GREEN}Env vars:{RESET} Set via setx (Windows)")
|
||||
return "OK"
|
||||
|
||||
# Linux/macOS: write to /etc/environment
|
||||
env_file = Path("/etc/environment")
|
||||
if not env_file.is_file():
|
||||
# Try creating it
|
||||
try:
|
||||
env_file.touch()
|
||||
except PermissionError:
|
||||
eprint(f" {YELLOW}Cannot write /etc/environment (no root){RESET}")
|
||||
_print_env_export(env_vars)
|
||||
return "MANUAL"
|
||||
|
||||
try:
|
||||
existing = env_file.read_text()
|
||||
except PermissionError:
|
||||
_print_env_export(env_vars)
|
||||
return "MANUAL"
|
||||
|
||||
lines = existing.splitlines()
|
||||
updated = False
|
||||
|
||||
for key, value in env_vars.items():
|
||||
found = False
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith(f"{key}="):
|
||||
lines[i] = f'{key}="{value}"'
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
lines.append(f'{key}="{value}"')
|
||||
updated = True
|
||||
|
||||
new_content = "\n".join(lines)
|
||||
if not new_content.endswith("\n"):
|
||||
new_content += "\n"
|
||||
|
||||
try:
|
||||
env_file.write_text(new_content)
|
||||
print(f" {GREEN}Env vars:{RESET} Written to /etc/environment")
|
||||
return "OK"
|
||||
except PermissionError:
|
||||
eprint(f" {YELLOW}Cannot write /etc/environment (no root){RESET}")
|
||||
_print_env_export(env_vars)
|
||||
return "MANUAL"
|
||||
|
||||
|
||||
def _print_env_export(env_vars):
|
||||
"""Print export commands for manual setup."""
|
||||
print(f"\n {YELLOW}Add these to your shell profile:{RESET}")
|
||||
for k, v in env_vars.items():
|
||||
print(f' export {k}="{v}"')
|
||||
print()
|
||||
|
||||
|
||||
# ─── Orchestration ─────────────────────────────────────────────────────
|
||||
|
||||
def apply_all_patches(cli_js_path, config, settings_only=False):
|
||||
"""Apply all patches. Returns overall results dict."""
|
||||
results = {}
|
||||
|
||||
if not settings_only:
|
||||
print(f"\n{BOLD}Patching cli.js...{RESET}")
|
||||
cli_results = patch_cli_js(cli_js_path, config)
|
||||
results.update(cli_results)
|
||||
|
||||
print(f"\n{BOLD}Configuring settings...{RESET}")
|
||||
results["user_settings"] = patch_user_settings(config)
|
||||
results["trusted_folders"] = patch_trusted_folders(config)
|
||||
results["system_env"] = setup_env_vars(config)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def rollback(cli_js_path):
|
||||
"""Restore cli.js from backup."""
|
||||
backup = cli_js_path + ".backup"
|
||||
if os.path.isfile(backup):
|
||||
shutil.copy2(backup, cli_js_path)
|
||||
print(f" {GREEN}Restored:{RESET} {cli_js_path}")
|
||||
return True
|
||||
else:
|
||||
eprint(f" {RED}No backup found:{RESET} {backup}")
|
||||
return False
|
||||
|
||||
|
||||
# ─── Validation (standalone) ───────────────────────────────────────────
|
||||
|
||||
def run_validation(cli_js_path):
|
||||
"""Run pattern validation on detected installation."""
|
||||
from updater.pattern_validator import validate_all, print_validation_report, get_summary
|
||||
|
||||
user_settings = str(Path.home() / ".qwen" / "settings.json")
|
||||
trusted_folders = str(Path.home() / ".qwen" / "trustedFolders.json")
|
||||
|
||||
results = validate_all(cli_js_path, user_settings, trusted_folders)
|
||||
counts = print_validation_report(results)
|
||||
return counts, get_summary(results)
|
||||
|
||||
|
||||
# ─── CLI ───────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Qwen Code Patcher — patches QwenCode CLI for custom AI proxy"
|
||||
)
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("--detect", action="store_true", help="Find QwenCode CLI installation")
|
||||
group.add_argument("--apply", action="store_true", help="Apply all patches")
|
||||
group.add_argument("--settings-only", action="store_true", help="Only settings + env (no cli.js)")
|
||||
group.add_argument("--rollback", action="store_true", help="Restore from backup")
|
||||
group.add_argument("--validate", action="store_true", help="Validate all targets")
|
||||
parser.add_argument("--config", type=str, help="Path to custom config file")
|
||||
|
||||
args = parser.parse_args()
|
||||
config = load_config(args.config)
|
||||
|
||||
# Detection
|
||||
qwen_root, cli_js_path = detect_qwen()
|
||||
|
||||
if args.detect:
|
||||
if qwen_root:
|
||||
version = read_version(qwen_root)
|
||||
print(f"\n {GREEN}Found QwenCode CLI{RESET}")
|
||||
print(f" Root: {qwen_root}")
|
||||
print(f" cli.js: {cli_js_path}")
|
||||
print(f" Version: {version}")
|
||||
else:
|
||||
eprint(f"\n {RED}QwenCode CLI not found{RESET}")
|
||||
sys.exit(1)
|
||||
return
|
||||
|
||||
if not qwen_root:
|
||||
eprint(f"{RED}QwenCode CLI not found. Install: npm install -g @qwen-code/qwen-code{RESET}")
|
||||
sys.exit(1)
|
||||
|
||||
version = read_version(qwen_root)
|
||||
print(f"\n{BOLD}QwenCode CLI v{version}{RESET} — {qwen_root}")
|
||||
|
||||
if args.validate:
|
||||
counts, summary = run_validation(cli_js_path)
|
||||
# Save report
|
||||
report_dir = SCRIPT_DIR / "reports"
|
||||
report_dir.mkdir(exist_ok=True)
|
||||
report_path = report_dir / f"validation_{version}.json"
|
||||
with open(report_path, "w") as f:
|
||||
json.dump(summary, f, indent=2)
|
||||
print(f"\n Report saved: {report_path}")
|
||||
if counts.get("RED", 0) > 0:
|
||||
sys.exit(2)
|
||||
return
|
||||
|
||||
if args.rollback:
|
||||
rollback(cli_js_path)
|
||||
return
|
||||
|
||||
if args.settings_only:
|
||||
results = apply_all_patches(cli_js_path, config, settings_only=True)
|
||||
else:
|
||||
results = apply_all_patches(cli_js_path, config, settings_only=False)
|
||||
|
||||
# Print summary
|
||||
print(f"\n{BOLD}Results:{RESET}")
|
||||
for target, status in results.items():
|
||||
if "OK" in str(status) or status == "already_patched":
|
||||
print(f" {GREEN}[OK]{RESET} {target}: {status}")
|
||||
elif "SKIP" in str(status):
|
||||
print(f" {YELLOW}[SKIP]{RESET} {target}: {status}")
|
||||
else:
|
||||
print(f" {CYAN}[INFO]{RESET} {target}: {status}")
|
||||
|
||||
print(f"\n{GREEN}Done!{RESET} Restart QwenCode CLI to apply changes.\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
234
qwen_old/uqwen_install.sh
Executable file
234
qwen_old/uqwen_install.sh
Executable file
@@ -0,0 +1,234 @@
|
||||
#!/usr/bin/env bash
|
||||
# Qwen Code — One-line installer
|
||||
# Usage:
|
||||
# curl -fsSL -H "Authorization: token TOKEN" \
|
||||
# https://git.sensey24.ru/aibot777/unlimitedcoding/raw/branch/master/qwen/uqwen_install.sh \
|
||||
# -o /tmp/uqwen.sh && sudo bash /tmp/uqwen.sh
|
||||
set -euo pipefail
|
||||
|
||||
GITEA_TOKEN="${GITEA_TOKEN:-cadffcb0a6a3be728ac1ff619bb40c86588f6837}"
|
||||
REPO_RAW="https://git.sensey24.ru/aibot777/unlimitedcoding/raw/branch/master/qwen"
|
||||
REGISTRY_URL="https://npm.sensey24.ru/"
|
||||
NPM_SCOPE="@qwen-code"
|
||||
NPM_PACKAGE="@qwen-code/qwen-code"
|
||||
|
||||
GREEN="\033[92m"
|
||||
RED="\033[91m"
|
||||
CYAN="\033[96m"
|
||||
YELLOW="\033[93m"
|
||||
BOLD="\033[1m"
|
||||
RESET="\033[0m"
|
||||
|
||||
log() { echo -e "${GREEN}[+]${RESET} $*"; }
|
||||
err() { echo -e "${RED}[!]${RESET} $*" >&2; }
|
||||
info() { echo -e "${CYAN}[i]${RESET} $*"; }
|
||||
warn() { echo -e "${YELLOW}[~]${RESET} $*"; }
|
||||
|
||||
echo -e "${BOLD}"
|
||||
echo " +--------------------------------------+"
|
||||
echo " | Qwen Code — Installer |"
|
||||
echo " +--------------------------------------+"
|
||||
echo -e "${RESET}"
|
||||
|
||||
# ---- Auto-install prerequisites ----
|
||||
|
||||
install_pkg() {
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update -qq && apt-get install -y -qq "$@"
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
dnf install -y -q "$@"
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
yum install -y -q "$@"
|
||||
elif command -v brew >/dev/null 2>&1; then
|
||||
brew install "$@"
|
||||
else
|
||||
err "No package manager found. Install $* manually."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Python3
|
||||
if ! command -v python3 &>/dev/null; then
|
||||
info "python3 not found, installing..."
|
||||
install_pkg python3
|
||||
fi
|
||||
log "Python3 $(python3 --version | awk '{print $2}')"
|
||||
|
||||
# curl
|
||||
if ! command -v curl &>/dev/null; then
|
||||
info "curl not found, installing..."
|
||||
install_pkg curl
|
||||
fi
|
||||
|
||||
# Node.js >= 20
|
||||
install_node() {
|
||||
info "Installing Node.js v24.x..."
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
curl -fsSL https://deb.nodesource.com/setup_24.x | bash - && apt-get install -y nodejs
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
curl -fsSL https://rpm.nodesource.com/setup_24.x | bash - && dnf install -y nodejs
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
curl -fsSL https://rpm.nodesource.com/setup_24.x | bash - && yum install -y nodejs
|
||||
elif command -v brew >/dev/null 2>&1; then
|
||||
brew install node
|
||||
else
|
||||
err "Cannot auto-install Node.js. Install manually: https://nodejs.org/"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if ! command -v node &>/dev/null; then
|
||||
install_node
|
||||
fi
|
||||
|
||||
NODE_VER=$(node -v | sed 's/v//' | cut -d. -f1)
|
||||
if [ "$NODE_VER" -lt 20 ]; then
|
||||
warn "Node.js >= 20 required (found v$NODE_VER). Upgrading..."
|
||||
install_node
|
||||
NODE_VER=$(node -v | sed 's/v//' | cut -d. -f1)
|
||||
fi
|
||||
log "Node.js $(node -v)"
|
||||
|
||||
# ---- Configure npm registry ----
|
||||
|
||||
info "Configuring npm registry: ${REGISTRY_URL}"
|
||||
npm config set "${NPM_SCOPE}:registry" "${REGISTRY_URL}" 2>/dev/null || true
|
||||
|
||||
# ---- Install Qwen Code ----
|
||||
|
||||
install_qwen_npm() {
|
||||
local attempt=1
|
||||
local max_attempts=3
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
info "Installing ${NPM_PACKAGE} (attempt ${attempt}/${max_attempts})..."
|
||||
if npm install -g "${NPM_PACKAGE}" 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
warn "Attempt $attempt failed."
|
||||
attempt=$((attempt + 1))
|
||||
[ $attempt -le $max_attempts ] && sleep 3
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
QWEN_BIN=""
|
||||
for candidate in qwen qwen-code; do
|
||||
if command -v "$candidate" &>/dev/null; then
|
||||
QWEN_BIN="$candidate"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$QWEN_BIN" ]; then
|
||||
if ! install_qwen_npm; then
|
||||
err "npm install failed after retries."
|
||||
err ""
|
||||
err "Possible fixes:"
|
||||
err " 1. Try HTTP instead of HTTPS:"
|
||||
err " npm config set ${NPM_SCOPE}:registry http://npm.sensey24.ru/"
|
||||
err " npm install -g ${NPM_PACKAGE}"
|
||||
err ""
|
||||
err " 2. Install from official npm + patch separately:"
|
||||
err " npm install -g ${NPM_PACKAGE}"
|
||||
err " # then re-run this script to apply patches"
|
||||
exit 1
|
||||
fi
|
||||
# Find the binary after install
|
||||
for candidate in qwen qwen-code; do
|
||||
if command -v "$candidate" &>/dev/null; then
|
||||
QWEN_BIN="$candidate"
|
||||
break
|
||||
fi
|
||||
done
|
||||
log "Qwen Code installed"
|
||||
else
|
||||
log "Qwen Code found: $QWEN_BIN"
|
||||
fi
|
||||
|
||||
# ---- Download and apply patcher ----
|
||||
|
||||
INSTALL_DIR=$(mktemp -d)
|
||||
cleanup() { rm -rf "$INSTALL_DIR" 2>/dev/null || true; }
|
||||
trap cleanup EXIT
|
||||
|
||||
info "Downloading patcher..."
|
||||
curl -fsSL "$REPO_RAW/qwen_patcher.py" -o "$INSTALL_DIR/qwen_patcher.py"
|
||||
curl -fsSL "$REPO_RAW/qwen_config.json" -o "$INSTALL_DIR/qwen_config.json"
|
||||
log "Patcher downloaded"
|
||||
|
||||
info "Applying patches (settings + env)..."
|
||||
python3 "$INSTALL_DIR/qwen_patcher.py" --settings-only --config "$INSTALL_DIR/qwen_config.json"
|
||||
PATCH_EXIT=$?
|
||||
|
||||
if [ $PATCH_EXIT -ne 0 ]; then
|
||||
warn "Settings-only patch returned $PATCH_EXIT, trying full patch..."
|
||||
python3 "$INSTALL_DIR/qwen_patcher.py" --apply --config "$INSTALL_DIR/qwen_config.json"
|
||||
fi
|
||||
log "Patches applied"
|
||||
|
||||
# ---- Set environment variables ----
|
||||
|
||||
info "Setting environment variables..."
|
||||
ENV_VARS='export QWEN_API_KEY="ClauderAPI"
|
||||
export QWEN_BASE_URL="https://ai.37-187-136-86.sslip.io"'
|
||||
|
||||
for rc_file in "$HOME/.bashrc" "$HOME/.zshrc"; do
|
||||
if [ -f "$rc_file" ]; then
|
||||
if ! grep -q 'QWEN_API_KEY' "$rc_file" 2>/dev/null; then
|
||||
echo "" >> "$rc_file"
|
||||
echo "# Qwen Code (UnlimitedCoding)" >> "$rc_file"
|
||||
echo "$ENV_VARS" >> "$rc_file"
|
||||
log "Added env vars to $(basename "$rc_file")"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
export QWEN_API_KEY="ClauderAPI"
|
||||
export QWEN_BASE_URL="https://ai.37-187-136-86.sslip.io"
|
||||
|
||||
# Also for all users if root
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
PROFILE_D="/etc/profile.d/qwen-code.sh"
|
||||
cat > "$PROFILE_D" << 'PROF_EOF'
|
||||
# Qwen Code (UnlimitedCoding)
|
||||
export QWEN_API_KEY="ClauderAPI"
|
||||
export QWEN_BASE_URL="https://ai.37-187-136-86.sslip.io"
|
||||
PROF_EOF
|
||||
chmod 644 "$PROFILE_D"
|
||||
log "Set env vars for all users: $PROFILE_D"
|
||||
fi
|
||||
|
||||
# ---- Verify ----
|
||||
|
||||
info "Verifying..."
|
||||
echo ""
|
||||
|
||||
if [ -n "$QWEN_BIN" ]; then
|
||||
RESULT=$(timeout 30 "$QWEN_BIN" -p "Reply with just OK" 2>&1 || true)
|
||||
if echo "$RESULT" | grep -qi "OK"; then
|
||||
echo ""
|
||||
echo -e "${GREEN}${BOLD} Qwen Code installed and patched!${RESET}"
|
||||
echo ""
|
||||
echo " Usage:"
|
||||
echo " $QWEN_BIN # interactive mode"
|
||||
echo " $QWEN_BIN -p \"Your prompt\" # single prompt"
|
||||
echo ""
|
||||
echo " Models:"
|
||||
echo " qwen3.5-plus, qwen3-coder-plus"
|
||||
echo " qwen3-coder-flash, coder-model"
|
||||
echo ""
|
||||
echo " If env vars not active, run: source ~/.bashrc"
|
||||
echo ""
|
||||
else
|
||||
warn "Patches applied but test prompt failed."
|
||||
echo " Response: $RESULT"
|
||||
echo ""
|
||||
echo " Try manually:"
|
||||
echo " source ~/.bashrc"
|
||||
echo " $QWEN_BIN -p 'Hello'"
|
||||
fi
|
||||
else
|
||||
warn "Qwen binary not found in PATH after install."
|
||||
echo " Check: npm list -g ${NPM_PACKAGE}"
|
||||
echo " Then run: source ~/.bashrc && qwen -p 'Hello'"
|
||||
fi
|
||||
Reference in New Issue
Block a user