Compare commits

...

11 Commits

Author SHA1 Message Date
chrome-storm-c442
cf319c502e v1.9.16: add --s3-url presigned URL command to ssh.py
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 04:05:00 -05:00
chrome-storm-c442
01ab318e4b v1.9.15: fix minimize/restore — remove grab_set, add Win32 restore fallback
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 03:58:32 -05:00
chrome-storm-c442
f9a81a4825 v1.9.14: fix dialog minimize bug — restore modal dialogs on un-minimize
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 03:25:18 -05:00
chrome-storm-c442
3bafb0deb8 skill: enforce server type checking — MinIO/S3, presigned URL, workflow examples
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 02:25:41 -05:00
chrome-storm-c442
b37e696094 v1.9.13: remove cleanup_gitea_releases — keep all Gitea releases
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:35:17 -05:00
chrome-storm-c442
289ce65431 chore: remove 12 old exe from git + use git rm in cleanup
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:32:01 -05:00
chrome-storm-c442
704ce3bef2 v1.9.12: cleanup orphan Gitea tags alongside old releases
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:29:53 -05:00
chrome-storm-c442
00f3b76d2a v1.9.11: always check updates on startup + cleanup Gitea releases
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:27:21 -05:00
chrome-storm-c442
efbbfa13ee v1.9.10: cleanup old Gitea releases — keep first + last 5
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:21:33 -05:00
chrome-storm-c442
3c4d02c5ec v1.9.9: fix stale server data after Edit — force reconnect with fresh credentials
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 09:11:36 -05:00
chrome-storm-c442
5b4672dfe3 v1.9.8: S3 resumable download — Range GET with .s3part resume on disconnect
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 08:33:48 -05:00
26 changed files with 404 additions and 52 deletions

View File

@@ -26,7 +26,7 @@ ServerManager — **кроссплатформенное** Desktop GUI (CustomTk
| grafana | `grafana_client.py` (requests) | Dashboards, Info, Setup | `--grafana-dashboards`, `--grafana-alerts` |
| prometheus | `prometheus_client.py` (requests) | Metrics, Info, Setup | `--prom-query`, `--prom-targets`, `--prom-alerts` |
| winrm | `winrm_client.py` (pywinrm) | PowerShell, Info, Setup | `--ps`, `--cmd` |
| s3 | `s3_client.py` (boto3) | Objects, Info, Setup | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete` |
| s3 | `s3_client.py` (boto3) | Objects, Info, Setup | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete`, `--s3-url` |
| rdp/vnc | `remote_desktop.py` | Launch, Info, Setup | — (запуск внешнего клиента) |
## БЕЗОПАСНОСТЬ
@@ -139,6 +139,13 @@ tools/
/ssh --redis ALIAS "GET key" # Redis-команда
/ssh --redis-info ALIAS # Redis INFO
/ssh --redis-keys ALIAS "pattern" # SCAN ключей
# S3 / MinIO
/ssh --s3-buckets ALIAS # Список бакетов
/ssh --s3-ls ALIAS bucket[/prefix] # Список объектов
/ssh --s3-upload ALIAS local bucket/key # Upload файла
/ssh --s3-download ALIAS bucket/key local # Download файла
/ssh --s3-delete ALIAS bucket/key # Удалить объект
/ssh --s3-url ALIAS bucket/key [SEC] # Presigned URL (по умолчанию 1 час)
# Grafana / Prometheus
/ssh --grafana-dashboards ALIAS # Дашборды
/ssh --prom-query ALIAS "up" # PromQL

View File

@@ -311,6 +311,7 @@ def _version_key(path: str):
return (0, 0, 0)
def cleanup_old_releases():
"""Keep the first release (v1.0.0) and the last 5 releases, delete the rest."""
import glob
@@ -327,9 +328,20 @@ def cleanup_old_releases():
keep = set([first] + last_5)
removed = []
_flags = subprocess.CREATE_NO_WINDOW if sys.platform == "win32" else 0
for f in all_exes:
if f not in keep:
os.remove(f)
# Use git rm so deletion is staged for commit
try:
subprocess.run(
["git", "rm", "-f", "--quiet", f],
cwd=PROJECT_DIR, creationflags=_flags,
capture_output=True,
)
except Exception:
# Fallback: just delete the file
if os.path.exists(f):
os.remove(f)
removed.append(os.path.basename(f))
if removed:

View File

@@ -240,16 +240,145 @@ class S3Client:
def download_file(self, bucket: str, key: str, local_path: str,
progress_cb=None, status_cb=None) -> bool:
"""Download an S3 object to a local file with retry.
"""Download with resume support using S3 Range GET.
progress_cb(bytes_transferred) — called periodically.
status_cb(message) — called with retry info.
On disconnect, keeps the .s3part file and resumes from where it
stopped. ETag is checked to detect if the remote file changed
(in that case the partial file is discarded and download restarts).
boto3 TransferConfig.num_download_attempts handles part-level retries.
This method adds full-transfer retries with reconnect.
progress_cb(bytes_delta) — called with each chunk size.
status_cb(message) — called with retry / resume info.
"""
if not self._ensure_connected():
return False
# --- 1. HEAD — get size and ETag ---
try:
head = self._client.head_object(Bucket=bucket, Key=key)
total_size = head["ContentLength"]
etag = head.get("ETag", "")
except Exception as exc:
log.error("S3 head_object failed: %s", exc)
return False
# Small files (< 1 MB) — simple download, no resume overhead
if total_size < 1024 * 1024:
return self._download_file_simple(
bucket, key, local_path, progress_cb, status_cb)
# --- 2. Check .s3part (partial download) ---
temp_path = local_path + ".s3part"
meta_path = local_path + ".s3meta"
start_byte = 0
if os.path.exists(temp_path):
saved_etag = ""
if os.path.exists(meta_path):
try:
with open(meta_path, "r") as f:
saved_etag = f.read().strip()
except Exception:
pass
if saved_etag == etag and etag:
start_byte = os.path.getsize(temp_path)
if start_byte >= total_size:
# Already fully downloaded
os.replace(temp_path, local_path)
self._cleanup_meta(meta_path)
self._last_ok = time.time()
return True
log.info("S3 resuming from byte %d / %d", start_byte, total_size)
if status_cb:
mb = start_byte / (1024 * 1024)
status_cb(f"Resuming from {mb:.1f} MB...")
else:
# ETag changed — file was modified on server, start fresh
try:
os.remove(temp_path)
except OSError:
pass
start_byte = 0
# Save ETag for future resume
try:
with open(meta_path, "w") as f:
f.write(etag)
except Exception:
pass
# Report already-downloaded bytes so progress bar is correct
if progress_cb and start_byte > 0:
progress_cb(start_byte)
# --- 3. Download loop with retry ---
chunk_size = _MULTIPART_CHUNKSIZE # 8 MB
for attempt in range(_MAX_RETRIES):
try:
if start_byte >= total_size:
break
range_header = f"bytes={start_byte}-"
resp = self._client.get_object(
Bucket=bucket, Key=key, Range=range_header)
with open(temp_path, "ab") as f:
for chunk in resp["Body"].iter_chunks(chunk_size=chunk_size):
f.write(chunk)
f.flush()
start_byte += len(chunk)
if progress_cb:
progress_cb(len(chunk))
# --- 4. Verify size ---
actual = os.path.getsize(temp_path)
if actual != total_size:
log.warning("S3 size mismatch: got %d, expected %d",
actual, total_size)
# Don't delete — maybe we can resume next attempt
if actual < total_size:
start_byte = actual
continue
# actual > total_size — corrupted, restart
try:
os.remove(temp_path)
except OSError:
pass
start_byte = 0
continue
# --- 5. Atomic rename ---
os.replace(temp_path, local_path)
self._cleanup_meta(meta_path)
self._last_ok = time.time()
log.info("S3 downloaded s3://%s/%s -> %s (%d bytes, resumed)",
bucket, key, local_path, total_size)
return True
except Exception as exc:
# Update start_byte from actual file size
if os.path.exists(temp_path):
start_byte = os.path.getsize(temp_path)
delay = _retry_delay(attempt)
log.warning("S3 download attempt %d/%d failed at byte %d: %s",
attempt + 1, _MAX_RETRIES, start_byte, exc)
if status_cb:
pct = (start_byte / total_size * 100) if total_size else 0
status_cb(f"Retry {attempt+1}/{_MAX_RETRIES} at {pct:.0f}%...")
time.sleep(delay)
self._reconnect()
# Adaptive chunk: reduce on repeated failures
if attempt >= 2 and chunk_size > 1024 * 1024:
chunk_size = 1024 * 1024 # 1 MB
log.info("S3 reducing chunk size to 1 MB")
log.error("S3 download failed after %d attempts: s3://%s/%s -> %s",
_MAX_RETRIES, bucket, key, local_path)
return False
def _download_file_simple(self, bucket: str, key: str, local_path: str,
progress_cb=None, status_cb=None) -> bool:
"""Simple download for small files (no resume overhead)."""
for attempt in range(_MAX_RETRIES):
try:
self._client.download_file(
@@ -257,6 +386,7 @@ class S3Client:
Config=self._transfer_config,
Callback=progress_cb,
)
self._last_ok = time.time()
log.info("S3 downloaded s3://%s/%s -> %s", bucket, key, local_path)
return True
except Exception as exc:
@@ -269,11 +399,18 @@ class S3Client:
if not self._reconnect():
log.error("S3 reconnect failed on attempt %d", attempt + 1)
continue
log.error("S3 download failed after %d attempts: s3://%s/%s -> %s",
_MAX_RETRIES, bucket, key, local_path)
return False
@staticmethod
def _cleanup_meta(meta_path: str):
"""Remove .s3meta file silently."""
try:
os.remove(meta_path)
except OSError:
pass
def delete_object(self, bucket: str, key: str) -> bool:
"""Delete an object from S3."""
if not self._ensure_connected():

View File

@@ -9,6 +9,15 @@ from typing import Dict, Optional, Tuple
from core.ssh_client import ShellSession, SFTPSession
_CRITICAL_KEYS = ('ip', 'port', 'username', 'password', 'type',
'access_key', 'secret_key', 'use_ssl')
def _server_changed(old: dict, new: dict) -> bool:
"""Check if critical connection fields differ."""
return any(old.get(k) != new.get(k) for k in _CRITICAL_KEYS)
class SessionData:
"""Container for session data including the actual sessions and their metadata."""
def __init__(self, alias: str, server: dict, key_path: str):
@@ -70,6 +79,11 @@ class SessionPool:
self._sessions[alias] = session_data
else:
session_data = self._sessions[alias]
# Invalidate if server connection data changed
if _server_changed(session_data.server, server):
session_data.cleanup()
session_data.server = server
session_data.key_path = key_path
# Update access time for LRU
self._update_last_access(alias)
@@ -108,6 +122,11 @@ class SessionPool:
self._sessions[alias] = session_data
else:
session_data = self._sessions[alias]
# Invalidate if server connection data changed
if _server_changed(session_data.server, server):
session_data.cleanup()
session_data.server = server
session_data.key_path = key_path
# Update access time for LRU
self._update_last_access(alias)

View File

@@ -401,12 +401,15 @@ del /f /q "%~f0" >nul 2>&1
return
time.sleep(0.1)
first_run = True
while self._running:
# Check if enough time passed since last check
# On first run after startup, always check regardless of interval
last_check = self.store.get_last_update_check()
now = time.time()
if not last_check or (now - last_check) >= _CHECK_INTERVAL:
if first_run or not last_check or (now - last_check) >= _CHECK_INTERVAL:
first_run = False
info = self.check_now()
if info and self._gui_callback:
mode = self.store.get_update_mode()

View File

@@ -15,10 +15,12 @@ class AboutDialog(ctk.CTkToplevel):
self.geometry("500x480")
self.resizable(False, False)
self.transient(master)
self.grab_set()
self.focus_force()
self.protocol("WM_DELETE_WINDOW", self._on_close)
self._master_ref = master
self._map_bind_id = master.bind("<Map>", self._on_parent_map, add="+")
# ── Header ──
ctk.CTkLabel(
self, text=t("about_title"),
@@ -78,9 +80,20 @@ class AboutDialog(ctk.CTkToplevel):
self, text=t("close"), width=120, command=self._on_close
).pack(pady=(10, 20))
def _on_parent_map(self, event=None):
"""Restore dialog when parent is un-minimized."""
try:
if not self.winfo_exists():
return
self.deiconify()
self.lift()
self.focus_force()
except Exception:
pass
def _on_close(self):
try:
self.grab_release()
self._master_ref.unbind("<Map>", self._map_bind_id)
except Exception:
pass
self.destroy()

View File

@@ -3,6 +3,7 @@ Main application window — sidebar + tabview layout.
"""
import tkinter
import sys
import customtkinter as ctk
from tkinter import messagebox
@@ -118,6 +119,37 @@ class App(ctk.CTk):
# Cleanup on close
self.protocol("WM_DELETE_WINDOW", self._on_close)
# Fix: restore window after Win+D (Show Desktop)
self.bind("<Map>", self._on_map, add="+")
if sys.platform == "win32":
self._setup_win32_restore()
def _on_map(self, event=None):
"""Ensure window is fully visible when restored from taskbar."""
try:
self.deiconify()
self.lift()
except Exception:
pass
def _setup_win32_restore(self):
"""Win32 fallback: periodic check for stuck minimized state."""
import ctypes
self._user32 = ctypes.windll.user32
self._hwnd = int(self.wm_frame(), 16)
self._check_minimized()
def _check_minimized(self):
"""If window is iconic but should be visible, force restore."""
try:
if self._user32.IsIconic(self._hwnd):
fg = self._user32.GetForegroundWindow()
if fg == self._hwnd:
self._user32.ShowWindow(self._hwnd, 9) # SW_RESTORE
except Exception:
pass
self.after(500, self._check_minimized)
def _build_layout(self):
# PanedWindow — resizable sidebar | main area
self._paned = tkinter.PanedWindow(
@@ -299,9 +331,19 @@ class App(ctk.CTk):
self.sidebar._select(new_alias)
self.session_pool.rename_server(alias, new_alias)
else:
info = self._tab_instances.get("info")
if info and hasattr(info, "refresh"):
info.refresh()
# Data may have changed (IP, port, password) — force reconnect
self._force_reconnect(alias)
def _force_reconnect(self, alias: str):
"""Force tabs to reconnect after server data changed."""
# Invalidate cached SSH/SFTP sessions in pool
self.session_pool.disconnect_session(alias)
# Reset _current_alias so set_server() bypasses early return
for widget in self._tab_instances.values():
if getattr(widget, '_current_alias', None) == alias:
widget._current_alias = None
# Re-trigger server selection (calls set_server on all tabs)
self._on_server_select(alias)
def _delete_server(self, alias: str):
if messagebox.askyesno(t("delete_server"), t("delete_confirm").format(alias=alias)):

View File

@@ -32,7 +32,11 @@ class GroupDialog(ctk.CTkToplevel):
self.geometry("340x200")
self.resizable(False, False)
self.transient(master)
self.grab_set()
self.focus_force()
self.protocol("WM_DELETE_WINDOW", self._on_close)
self._master_ref = master
self._map_bind_id = master.bind("<Map>", self._on_parent_map, add="+")
# ── Name ──
ctk.CTkLabel(self, text=t("group_name"), anchor="w").pack(
@@ -71,7 +75,7 @@ class GroupDialog(ctk.CTkToplevel):
btn_frame.pack(fill="x", padx=20, pady=(15, 10))
ctk.CTkButton(btn_frame, text=t("cancel"), width=80,
fg_color="gray", command=self.destroy).pack(side="left")
fg_color="gray", command=self._on_close).pack(side="left")
ctk.CTkButton(btn_frame, text=t("save"), width=80,
command=self._save).pack(side="right")
@@ -90,6 +94,23 @@ class GroupDialog(ctk.CTkToplevel):
else:
btn.configure(border_color=fg)
def _on_parent_map(self, event=None):
try:
if not self.winfo_exists():
return
self.deiconify()
self.lift()
self.focus_force()
except Exception:
pass
def _on_close(self):
try:
self._master_ref.unbind("<Map>", self._map_bind_id)
except Exception:
pass
self.destroy()
def _save(self):
name = self._name_var.get().strip()
if not name:
@@ -107,4 +128,4 @@ class GroupDialog(ctk.CTkToplevel):
group = self.store.add_group(name, self._selected_color)
self.result = group
self.destroy()
self._on_close()

View File

@@ -54,14 +54,14 @@ class ServerDialog(ctk.CTkToplevel):
self.geometry("450x720")
self.resizable(False, False)
# transient BEFORE grab_set — prevents focus lock on minimize
self.transient(master)
self.grab_set()
self.focus_force()
# Release grab on close (prevents stuck app)
self.protocol("WM_DELETE_WINDOW", self._on_close)
# Restore dialog when parent is un-minimized
self._master_ref = master
self._map_bind_id = master.bind("<Map>", self._on_parent_map, add="+")
self._field_frames: dict[str, ctk.CTkFrame] = {}
self._build_ui(server)
@@ -485,10 +485,20 @@ class ServerDialog(ctk.CTkToplevel):
except ValueError as e:
self._show_error(str(e))
def _on_close(self):
"""Release grab and destroy — prevents stuck app on minimize."""
def _on_parent_map(self, event=None):
"""Restore dialog when parent window is un-minimized."""
try:
self.grab_release()
if not self.winfo_exists():
return
self.deiconify()
self.lift()
self.focus_force()
except Exception:
pass
def _on_close(self):
try:
self._master_ref.unbind("<Map>", self._map_bind_id)
except Exception:
pass
self.destroy()

View File

@@ -349,8 +349,9 @@ class S3Tab(ctk.CTkFrame):
def _on_transfer_status(self, message: str):
"""Called from transfer thread with retry/status info."""
# Reset progress on retry (boto3 restarts the transfer)
self._transfer_bytes = 0
# Note: do NOT reset _transfer_bytes here — resumable download
# reports already-downloaded bytes via progress_cb, so resetting
# would break the progress bar on resume.
self.after(0, lambda: self._status_label.configure(text=message))
def _upload_files(self, paths: list[str]):

View File

@@ -83,7 +83,11 @@ class UpdateDialog(ctk.CTkToplevel):
self.geometry("500x420")
self.resizable(False, False)
self.transient(parent)
self.grab_set()
self.focus_force()
self.protocol("WM_DELETE_WINDOW", self._on_close)
self._master_ref = parent
self._map_bind_id = parent.bind("<Map>", self._on_parent_map, add="+")
self._info = info
self._downloaded_path = downloaded_path
@@ -99,6 +103,23 @@ class UpdateDialog(ctk.CTkToplevel):
py = parent.winfo_y() + (parent.winfo_height() - 420) // 2
self.geometry(f"+{px}+{py}")
def _on_parent_map(self, event=None):
try:
if not self.winfo_exists():
return
self.deiconify()
self.lift()
self.focus_force()
except Exception:
pass
def _on_close(self):
try:
self._master_ref.unbind("<Map>", self._map_bind_id)
except Exception:
pass
self.destroy()
def _build_ui(self):
from version import __version__
@@ -194,7 +215,7 @@ class UpdateDialog(ctk.CTkToplevel):
width=80, height=34, corner_radius=8,
fg_color="#4b5563", hover_color="#374151",
font=ctk.CTkFont(size=13),
command=self.destroy,
command=self._on_close,
).pack(side="right", padx=(8, 0))
ctk.CTkButton(
@@ -268,4 +289,4 @@ class UpdateDialog(ctk.CTkToplevel):
def _on_skip_click(self):
if self._on_skip:
self._on_skip(self._info["version"])
self.destroy()
self._on_close()

View File

@@ -1,7 +1,7 @@
# Скилл /ssh — управление удалёнными серверами
Ты управляешь удалёнными серверами через универсальную CLI-утилиту.
Поддерживаются: SSH, SQL (MariaDB/MSSQL/PostgreSQL), Redis, S3, Grafana, Prometheus, WinRM (PowerShell/CMD).
Поддерживаются: SSH, SQL (MariaDB/MSSQL/PostgreSQL), Redis, S3/MinIO, Grafana, Prometheus, WinRM (PowerShell/CMD).
## ВАЖНО — Безопасность
@@ -19,33 +19,47 @@
Пользователь передаёт через `$ARGUMENTS`. Разбери и выполни.
## КРИТИЧНО — Команды зависят от типа сервера
## КРИТИЧНО — СНАЧАЛА ПРОВЕРЬ ТИП СЕРВЕРА
`--list` возвращает колонку `Type` для каждого сервера. **Тип определяет какие команды использовать:**
**ПЕРЕД ЛЮБОЙ операцией** с сервером — **ОБЯЗАТЕЛЬНО** выполни `--list` и посмотри колонку `Type`.
**ЗАПРЕЩЕНО** угадывать тип сервера. MinIO/S3 — это НЕ SSH, Redis — это НЕ SSH, MariaDB — это НЕ SSH.
| Тип | Команды |
|-----|---------|
| `ssh` | `ALIAS "command"`, `--upload`, `--download`, `--ping`, `--install-key` |
| `telnet` | `ALIAS "command"` (как ssh, но без SFTP/sudo/ключей) |
| `mariadb` / `mssql` / `postgresql` | `--sql`, `--sql-databases`, `--sql-tables` |
| `redis` | `--redis`, `--redis-info`, `--redis-keys` |
| `s3` | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete` |
| `grafana` | `--grafana-dashboards`, `--grafana-alerts` |
| `prometheus` | `--prom-query`, `--prom-targets`, `--prom-alerts` |
| `winrm` | `--ps`, `--cmd` |
| `rdp` / `vnc` | Только GUI (запуск внешнего клиента), CLI-команд нет |
**Тип сервера определяет КАКИЕ команды использовать. Использование команд не того типа — СЛОМАЕТ операцию.**
**`ALIAS "command"` — ТОЛЬКО для типа `ssh`.** Для Redis — `--redis`, для SQL — `--sql`, для WinRM — `--ps`/`--cmd` и т.д.
| Тип | Команды | НЕ использовать |
|-----|---------|-----------------|
| `ssh` | `ALIAS "command"`, `--upload`, `--download`, `--ping`, `--install-key` | — |
| `telnet` | `ALIAS "command"` (без SFTP/sudo/ключей) | `--upload`, `--download` |
| `mariadb` / `mssql` / `postgresql` | `--sql`, `--sql-databases`, `--sql-tables` | `ALIAS "command"` |
| `redis` | `--redis`, `--redis-info`, `--redis-keys` | `ALIAS "command"` |
| `s3` (MinIO, AWS S3, и др.) | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete`, `--s3-url` | `ALIAS "command"`, `--upload`, `--download` |
| `grafana` | `--grafana-dashboards`, `--grafana-alerts` | `ALIAS "command"` |
| `prometheus` | `--prom-query`, `--prom-targets`, `--prom-alerts` | `ALIAS "command"` |
| `winrm` | `--ps`, `--cmd` | `ALIAS "command"` |
| `rdp` / `vnc` | Только GUI | всё |
**`ALIAS "command"` (shell-команды типа ls, cat, mkdir) — ТОЛЬКО для типов `ssh` и `telnet`.**
```bash
# Тип redis → --redis-info, НЕ ALIAS "INFO"
python ~/.server-connections/ssh.py --redis-info "Reddis main ovh"
# ❌ НЕПРАВИЛЬНО — MinIO/S3 это НЕ SSH, нельзя выполнять shell-команды
python ~/.server-connections/ssh.py "minio-alias" "ls /bucket"
python ~/.server-connections/ssh.py "minio-alias" "mkdir /bucket/folder"
# Тип mariadb → --sql-databases, НЕ ALIAS "SHOW DATABASES"
python ~/.server-connections/ssh.py --sql-databases "Maria Db Connection main ovh"
# ✅ ПРАВИЛЬНО — S3-команды для типа s3
python ~/.server-connections/ssh.py --s3-ls "minio-alias" bucket
python ~/.server-connections/ssh.py --s3-upload "minio-alias" "D:/file.txt" bucket/folder/file.txt
# Тип ssh → ALIAS "command"
python ~/.server-connections/ssh.py investor "uptime"
# ❌ НЕПРАВИЛЬНО — Redis это НЕ SSH
python ~/.server-connections/ssh.py "redis-alias" "INFO"
# ✅ ПРАВИЛЬНО
python ~/.server-connections/ssh.py --redis-info "redis-alias"
# ❌ НЕПРАВИЛЬНО — MariaDB это НЕ SSH
python ~/.server-connections/ssh.py "mariadb-alias" "SHOW DATABASES"
# ✅ ПРАВИЛЬНО
python ~/.server-connections/ssh.py --sql-databases "mariadb-alias"
```
## Общие команды
@@ -159,7 +173,12 @@ python ~/.server-connections/ssh.py --redis-info ALIAS
python ~/.server-connections/ssh.py --redis-keys ALIAS "user:*"
```
## S3-команды (тип: s3)
## S3-команды (тип: s3) — MinIO, AWS S3, любое S3-совместимое хранилище
**MinIO = тип `s3`.** Когда пользователь говорит "MinIO" или "S3" — используй ТОЛЬКО `--s3-*` команды.
**НЕ пытайся** выполнять shell-команды (`ls`, `mkdir`, `cat`) на S3-серверах — это не SSH!
**Папки в S3 не существуют** — это префиксы. "Создать папку" = загрузить файл с префиксом в ключе (например `bucket/folder/file.txt`).
### Список бакетов
```bash
@@ -187,6 +206,25 @@ python ~/.server-connections/ssh.py --s3-download ALIAS bucket/key "D:/local/fil
python ~/.server-connections/ssh.py --s3-delete ALIAS bucket/key
```
### Получить ссылку на файл (presigned URL)
```bash
python ~/.server-connections/ssh.py --s3-url ALIAS bucket/key
python ~/.server-connections/ssh.py --s3-url ALIAS bucket/key 86400
```
По умолчанию ссылка действует 1 час (3600 сек). Второй аргумент — время жизни в секундах (например 86400 = 24 часа).
### Типичный workflow: "создай папку и залей файл"
```bash
# 1. Посмотри бакеты
python ~/.server-connections/ssh.py --s3-buckets ALIAS
# 2. "Создать папку" = просто загрузить файл с нужным путём (prefix)
python ~/.server-connections/ssh.py --s3-upload ALIAS "D:/file.txt" mybucket/newfolder/file.txt
# 3. Проверить
python ~/.server-connections/ssh.py --s3-ls ALIAS mybucket/newfolder/
# 4. Получить ссылку
python ~/.server-connections/ssh.py --s3-url ALIAS mybucket/newfolder/file.txt
```
## Grafana-команды (тип: grafana)
### Список дашбордов

View File

@@ -42,6 +42,7 @@ S3 (type: s3):
python ssh.py --s3-upload ALIAS local bucket/key # upload file
python ssh.py --s3-download ALIAS bucket/key local # download file
python ssh.py --s3-delete ALIAS bucket/key # delete object
python ssh.py --s3-url ALIAS bucket/key [SEC] # presigned URL (default 3600s)
WinRM (type: winrm):
python ssh.py --ps ALIAS "Get-Process" # PowerShell via WinRM
@@ -1459,6 +1460,27 @@ def s3_delete(server: dict, remote_path: str):
sys.exit(1)
def s3_url(server: dict, remote_path: str, expires: int = 3600):
"""Generate a presigned URL for an S3 object."""
client = _get_s3_client(server)
parts = remote_path.split("/", 1)
bucket = parts[0] if parts else server.get("bucket", "")
key = parts[1] if len(parts) > 1 else ""
if not bucket or not key:
print("ERROR: Usage: --s3-url ALIAS bucket/key [seconds]", file=sys.stderr)
sys.exit(1)
try:
url = client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket, "Key": key},
ExpiresIn=expires,
)
print(url)
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(1)
# ── Grafana commands ──────────────────────────────────
def _grafana_request(server: dict, endpoint: str) -> dict:
@@ -1763,6 +1785,12 @@ def main():
alias = _resolve_alias(sys.argv[2], servers)
s3_delete(servers[alias], sys.argv[3])
sys.exit(0)
if cmd == "--s3-url" and len(sys.argv) >= 4:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
expires = int(sys.argv[4]) if len(sys.argv) >= 5 else 3600
s3_url(servers[alias], sys.argv[3], expires)
sys.exit(0)
# ── Grafana commands ──
if cmd == "--grafana-dashboards" and len(sys.argv) >= 3:

View File

@@ -1,6 +1,6 @@
"""Version info for ServerManager."""
__version__ = "1.9.7"
__version__ = "1.9.16"
__app_name__ = "ServerManager"
__author__ = "aibot777"
__description__ = "Desktop GUI for managing remote servers"