v1.9.0: S3 server type — bucket/object browser, drag-and-drop upload, resilient transfers
New server type: S3 (MinIO, AWS, any S3-compatible storage) - core/s3_client.py: boto3 client with auto-reconnect, 10 retries, exponential backoff, multipart upload/download, tcp_keepalive - gui/tabs/s3_tab.py: object browser (Treeview), bucket selector, folder navigation, drag-and-drop upload from Explorer (windnd), progress bar with %, multi-file upload - CLI: --s3-buckets, --s3-ls, --s3-upload, --s3-download, --s3-delete with retry - ServerDialog: access_key, secret_key, bucket fields - Registration: server_store, connection_factory, status_checker, icons, app, i18n (EN/RU/ZH) - Fix: build.py cleanup_old_releases now sorts by semver (was lexicographic, broke v1.8.100+) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -26,6 +26,7 @@ ServerManager — **кроссплатформенное** Desktop GUI (CustomTk
|
||||
| grafana | `grafana_client.py` (requests) | Dashboards, Info, Setup | `--grafana-dashboards`, `--grafana-alerts` |
|
||||
| prometheus | `prometheus_client.py` (requests) | Metrics, Info, Setup | `--prom-query`, `--prom-targets`, `--prom-alerts` |
|
||||
| winrm | `winrm_client.py` (pywinrm) | PowerShell, Info, Setup | `--ps`, `--cmd` |
|
||||
| s3 | `s3_client.py` (boto3) | Objects, Info, Setup | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete` |
|
||||
| rdp/vnc | `remote_desktop.py` | Launch, Info, Setup | — (запуск внешнего клиента) |
|
||||
|
||||
## БЕЗОПАСНОСТЬ
|
||||
@@ -77,6 +78,7 @@ core/ # Бизнес-логика
|
||||
├── grafana_client.py # Grafana REST API
|
||||
├── prometheus_client.py # Prometheus REST API
|
||||
├── telnet_client.py # Telnet (тот же интерфейс что ShellSession)
|
||||
├── s3_client.py # S3/MinIO (boto3)
|
||||
├── winrm_client.py # PowerShell/CMD через WinRM
|
||||
├── remote_desktop.py # RDP/VNC (запуск внешнего клиента)
|
||||
├── connection_factory.py # Фабрика: тип → клиент (lazy imports)
|
||||
@@ -97,6 +99,7 @@ gui/
|
||||
│ ├── query_tab.py # SQL-редактор + Treeview + Export CSV
|
||||
│ ├── redis_tab.py # Redis-консоль + история
|
||||
│ ├── grafana_tab.py # Дашборды + алерты
|
||||
│ ├── s3_tab.py # S3 браузер объектов
|
||||
│ ├── prometheus_tab.py # PromQL + targets
|
||||
│ ├── powershell_tab.py # PS/CMD (WinRM)
|
||||
│ ├── launch_tab.py # RDP/VNC кнопка Connect
|
||||
|
||||
12
build.py
12
build.py
@@ -248,17 +248,25 @@ def publish_gitea_release(exe_path: str):
|
||||
print(f"Gitea asset upload failed: {e}")
|
||||
|
||||
|
||||
def _version_key(path: str):
|
||||
"""Extract (major, minor, patch) tuple for semver sorting."""
|
||||
m = re.search(r'v(\d+)\.(\d+)\.(\d+)', os.path.basename(path))
|
||||
if m:
|
||||
return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
|
||||
return (0, 0, 0)
|
||||
|
||||
|
||||
def cleanup_old_releases():
|
||||
"""Keep the first release (v1.0.0) and the last 5 releases, delete the rest."""
|
||||
import glob
|
||||
|
||||
pattern = os.path.join(RELEASES_DIR, f"{__app_name__}-v*")
|
||||
all_exes = sorted(glob.glob(pattern))
|
||||
all_exes = sorted(glob.glob(pattern), key=_version_key)
|
||||
|
||||
if len(all_exes) <= 6: # first + 5 = 6, nothing to clean
|
||||
return
|
||||
|
||||
# First release is always all_exes[0] (sorted, v1.0.0 < v1.8.x)
|
||||
# First release is always all_exes[0] (sorted by semver, v1.0.0 < v1.8.x)
|
||||
first = all_exes[0]
|
||||
last_5 = all_exes[-5:]
|
||||
keep = set([first] + last_5)
|
||||
|
||||
@@ -37,6 +37,10 @@ def create_connection(server: dict, key_path: str = ""):
|
||||
from core.winrm_client import WinRMClient
|
||||
return WinRMClient(server)
|
||||
|
||||
if server_type == "s3":
|
||||
from core.s3_client import S3Client
|
||||
return S3Client(server)
|
||||
|
||||
if server_type in ("rdp", "vnc"):
|
||||
from core.remote_desktop import RemoteDesktopLauncher
|
||||
return RemoteDesktopLauncher()
|
||||
|
||||
96
core/i18n.py
96
core/i18n.py
@@ -359,6 +359,38 @@ _EN = {
|
||||
"redis_disconnected": "Not connected",
|
||||
"redis_error": "Error: {error}",
|
||||
|
||||
# S3 tab
|
||||
"objects": "Objects",
|
||||
"access_key": "Access Key",
|
||||
"secret_key": "Secret Key",
|
||||
"placeholder_secret_key": "Secret key...",
|
||||
"bucket": "Bucket",
|
||||
"s3_objects": "S3 Objects",
|
||||
"s3_bucket": "Bucket:",
|
||||
"s3_back": "Back",
|
||||
"s3_refresh": "Refresh",
|
||||
"s3_upload": "Upload",
|
||||
"s3_download": "Download",
|
||||
"s3_delete": "Delete",
|
||||
"s3_col_name": "Name",
|
||||
"s3_col_size": "Size",
|
||||
"s3_col_modified": "Modified",
|
||||
"s3_connecting": "Connecting...",
|
||||
"s3_connect_failed": "Connection failed",
|
||||
"s3_loading": "Loading...",
|
||||
"s3_items_count": "{count} items",
|
||||
"s3_uploading": "Uploading...",
|
||||
"s3_upload_failed": "Upload failed",
|
||||
"s3_downloading": "Downloading...",
|
||||
"s3_download_ok": "Download complete",
|
||||
"s3_download_failed": "Download failed",
|
||||
"s3_deleting": "Deleting...",
|
||||
"s3_delete_failed": "Delete failed",
|
||||
"s3_drop_hint": "Drag files here to upload",
|
||||
"s3_uploading_n": "Uploading {count} files...",
|
||||
"s3_uploaded_n": "Uploaded {count} files",
|
||||
"s3_upload_partial": "Uploaded {ok}/{total} files",
|
||||
|
||||
# Grafana tab
|
||||
"grafana_refresh": "Refresh",
|
||||
"grafana_dashboards": "Dashboards",
|
||||
@@ -829,6 +861,38 @@ _RU = {
|
||||
"redis_disconnected": "Не подключено",
|
||||
"redis_error": "Ошибка: {error}",
|
||||
|
||||
# S3 tab
|
||||
"objects": "Объекты",
|
||||
"access_key": "Access Key",
|
||||
"secret_key": "Secret Key",
|
||||
"placeholder_secret_key": "Секретный ключ...",
|
||||
"bucket": "Бакет",
|
||||
"s3_objects": "Объекты S3",
|
||||
"s3_bucket": "Бакет:",
|
||||
"s3_back": "Назад",
|
||||
"s3_refresh": "Обновить",
|
||||
"s3_upload": "Загрузить",
|
||||
"s3_download": "Скачать",
|
||||
"s3_delete": "Удалить",
|
||||
"s3_col_name": "Имя",
|
||||
"s3_col_size": "Размер",
|
||||
"s3_col_modified": "Изменён",
|
||||
"s3_connecting": "Подключение...",
|
||||
"s3_connect_failed": "Ошибка подключения",
|
||||
"s3_loading": "Загрузка...",
|
||||
"s3_items_count": "{count} объектов",
|
||||
"s3_uploading": "Загрузка файла...",
|
||||
"s3_upload_failed": "Ошибка загрузки",
|
||||
"s3_downloading": "Скачивание...",
|
||||
"s3_download_ok": "Скачивание завершено",
|
||||
"s3_download_failed": "Ошибка скачивания",
|
||||
"s3_deleting": "Удаление...",
|
||||
"s3_delete_failed": "Ошибка удаления",
|
||||
"s3_drop_hint": "Перетащите файлы сюда для загрузки",
|
||||
"s3_uploading_n": "Загрузка {count} файлов...",
|
||||
"s3_uploaded_n": "Загружено {count} файлов",
|
||||
"s3_upload_partial": "Загружено {ok}/{total} файлов",
|
||||
|
||||
# Grafana tab
|
||||
"grafana_refresh": "Обновить",
|
||||
"grafana_dashboards": "Дашборды",
|
||||
@@ -1299,6 +1363,38 @@ _ZH = {
|
||||
"redis_disconnected": "未连接",
|
||||
"redis_error": "错误: {error}",
|
||||
|
||||
# S3 tab
|
||||
"objects": "对象",
|
||||
"access_key": "Access Key",
|
||||
"secret_key": "Secret Key",
|
||||
"placeholder_secret_key": "密钥...",
|
||||
"bucket": "存储桶",
|
||||
"s3_objects": "S3 对象",
|
||||
"s3_bucket": "存储桶:",
|
||||
"s3_back": "返回",
|
||||
"s3_refresh": "刷新",
|
||||
"s3_upload": "上传",
|
||||
"s3_download": "下载",
|
||||
"s3_delete": "删除",
|
||||
"s3_col_name": "名称",
|
||||
"s3_col_size": "大小",
|
||||
"s3_col_modified": "修改时间",
|
||||
"s3_connecting": "连接中...",
|
||||
"s3_connect_failed": "连接失败",
|
||||
"s3_loading": "加载中...",
|
||||
"s3_items_count": "{count} 个对象",
|
||||
"s3_uploading": "上传中...",
|
||||
"s3_upload_failed": "上传失败",
|
||||
"s3_downloading": "下载中...",
|
||||
"s3_download_ok": "下载完成",
|
||||
"s3_download_failed": "下载失败",
|
||||
"s3_deleting": "删除中...",
|
||||
"s3_delete_failed": "删除失败",
|
||||
"s3_drop_hint": "拖拽文件到此处上传",
|
||||
"s3_uploading_n": "正在上传 {count} 个文件...",
|
||||
"s3_uploaded_n": "已上传 {count} 个文件",
|
||||
"s3_upload_partial": "已上传 {ok}/{total} 个文件",
|
||||
|
||||
# Grafana tab
|
||||
"grafana_refresh": "刷新",
|
||||
"grafana_dashboards": "仪表盘",
|
||||
|
||||
@@ -57,6 +57,7 @@ ICONS = {
|
||||
"powershell": "\u2328", # ⌨
|
||||
"launch": "\U0001f5a5", # 🖥
|
||||
"totp": "\U0001f510", # 🔐
|
||||
"objects": "\U0001faa3", # 🪣
|
||||
|
||||
# Context menu
|
||||
"connect": "\u25b6", # ▶
|
||||
@@ -91,6 +92,7 @@ TYPE_COLORS = {
|
||||
"redis": "#dc2626",
|
||||
"grafana": "#f97316",
|
||||
"prometheus": "#e11d48",
|
||||
"s3": "#16a34a",
|
||||
}
|
||||
|
||||
# Unicode symbols for each server type (reliable, no PIL needed)
|
||||
@@ -106,6 +108,7 @@ TYPE_SYMBOLS = {
|
||||
"redis": "\u25c6", # ◆
|
||||
"grafana": "\U0001f4ca", # 📊
|
||||
"prometheus": "\U0001f525", # 🔥
|
||||
"s3": "\U0001faa3", # 🪣
|
||||
}
|
||||
|
||||
# Short text labels for sidebar badge
|
||||
@@ -121,6 +124,7 @@ TYPE_LABELS = {
|
||||
"redis": "RDS",
|
||||
"grafana": "GRF",
|
||||
"prometheus": "PRM",
|
||||
"s3": "S3",
|
||||
}
|
||||
|
||||
|
||||
@@ -157,6 +161,7 @@ TAB_ICONS = {
|
||||
"metrics": "metrics",
|
||||
"powershell": "powershell",
|
||||
"launch": "launch",
|
||||
"objects": "objects",
|
||||
}
|
||||
|
||||
# Context menu icon mapping (i18n_key -> icon_name)
|
||||
|
||||
288
core/s3_client.py
Normal file
288
core/s3_client.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
S3 client wrapper — duck-typed, lazy-imports boto3 module.
|
||||
Works with any S3-compatible storage (AWS, MinIO, etc.).
|
||||
|
||||
Resilience features:
|
||||
- Adaptive retry with exponential backoff (up to 10 attempts)
|
||||
- Multipart upload/download with configurable chunk size
|
||||
- Auto-reconnect on connection loss (network switch, Wi-Fi change)
|
||||
- boto3 TransferConfig tuned for unstable connections
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from core.logger import log
|
||||
|
||||
_boto3 = None
|
||||
_botocore = None
|
||||
|
||||
# Retry / resilience constants
|
||||
_MAX_RETRIES = 10
|
||||
_BASE_DELAY = 2.0 # seconds
|
||||
_MAX_DELAY = 60.0 # seconds
|
||||
_MULTIPART_THRESHOLD = 8 * 1024 * 1024 # 8 MB — use multipart above this
|
||||
_MULTIPART_CHUNKSIZE = 8 * 1024 * 1024 # 8 MB chunks
|
||||
_MAX_CONCURRENCY = 4 # parallel parts (low for unstable links)
|
||||
|
||||
|
||||
def _get_boto3():
|
||||
global _boto3, _botocore
|
||||
if _boto3 is None:
|
||||
import boto3 as _b
|
||||
import botocore as _bc
|
||||
_boto3 = _b
|
||||
_botocore = _bc
|
||||
return _boto3
|
||||
|
||||
|
||||
def _get_transfer_config():
|
||||
"""TransferConfig tuned for unreliable connections."""
|
||||
from boto3.s3.transfer import TransferConfig
|
||||
return TransferConfig(
|
||||
multipart_threshold=_MULTIPART_THRESHOLD,
|
||||
multipart_chunksize=_MULTIPART_CHUNKSIZE,
|
||||
max_concurrency=_MAX_CONCURRENCY,
|
||||
num_download_attempts=_MAX_RETRIES,
|
||||
)
|
||||
|
||||
|
||||
def _retry_delay(attempt: int) -> float:
|
||||
"""Exponential backoff: 2, 4, 8, 16, 32, 60, 60, ..."""
|
||||
delay = min(_BASE_DELAY * (2 ** attempt), _MAX_DELAY)
|
||||
return delay
|
||||
|
||||
|
||||
class S3Client:
|
||||
"""Manage a single S3 connection. No ABC — duck typing."""
|
||||
|
||||
def __init__(self, server: dict):
|
||||
self._server = server
|
||||
self._endpoint = server.get("ip", "")
|
||||
# If endpoint doesn't start with http, add https
|
||||
if self._endpoint and not self._endpoint.startswith("http"):
|
||||
use_ssl = server.get("use_ssl", True)
|
||||
scheme = "https" if use_ssl else "http"
|
||||
port = int(server.get("port", 443))
|
||||
if (scheme == "https" and port == 443) or (scheme == "http" and port == 80):
|
||||
self._endpoint = f"{scheme}://{self._endpoint}"
|
||||
else:
|
||||
self._endpoint = f"{scheme}://{self._endpoint}:{port}"
|
||||
self._access_key = server.get("access_key", "")
|
||||
self._secret_key = server.get("secret_key", "")
|
||||
self._bucket = server.get("bucket", "")
|
||||
self._use_ssl = server.get("use_ssl", True)
|
||||
self._client = None
|
||||
self._transfer_config = None
|
||||
|
||||
# -- lifecycle --------------------------------------------------------
|
||||
|
||||
def connect(self) -> bool:
|
||||
try:
|
||||
b3 = _get_boto3()
|
||||
import botocore.config
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
config = botocore.config.Config(
|
||||
signature_version="s3v4",
|
||||
connect_timeout=15,
|
||||
read_timeout=60,
|
||||
retries={"max_attempts": 5, "mode": "adaptive"},
|
||||
tcp_keepalive=True,
|
||||
)
|
||||
self._client = b3.client(
|
||||
"s3",
|
||||
endpoint_url=self._endpoint,
|
||||
aws_access_key_id=self._access_key,
|
||||
aws_secret_access_key=self._secret_key,
|
||||
config=config,
|
||||
verify=False,
|
||||
)
|
||||
self._transfer_config = _get_transfer_config()
|
||||
# Test connection
|
||||
self._client.list_buckets()
|
||||
log.info("S3 connected %s", self._endpoint)
|
||||
return True
|
||||
except Exception as exc:
|
||||
log.error("S3 connect failed: %s", exc)
|
||||
self._client = None
|
||||
return False
|
||||
|
||||
def _reconnect(self) -> bool:
|
||||
"""Try to re-establish the S3 connection after a drop."""
|
||||
log.warning("S3 reconnecting to %s...", self._endpoint)
|
||||
self._client = None
|
||||
return self.connect()
|
||||
|
||||
def _ensure_connected(self) -> bool:
|
||||
"""Check connection, reconnect if needed."""
|
||||
if self._client is None:
|
||||
return self._reconnect()
|
||||
try:
|
||||
self._client.list_buckets()
|
||||
return True
|
||||
except Exception:
|
||||
return self._reconnect()
|
||||
|
||||
def disconnect(self):
|
||||
self._client = None
|
||||
log.info("S3 disconnected")
|
||||
|
||||
def check_connection(self) -> bool:
|
||||
try:
|
||||
if self._client is None:
|
||||
return False
|
||||
self._client.list_buckets()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# -- bucket operations ------------------------------------------------
|
||||
|
||||
def list_buckets(self) -> list[dict]:
|
||||
"""Return list of {'Name': str, 'CreationDate': datetime}."""
|
||||
if not self._ensure_connected():
|
||||
return []
|
||||
try:
|
||||
resp = self._client.list_buckets()
|
||||
return resp.get("Buckets", [])
|
||||
except Exception as exc:
|
||||
log.error("S3 list_buckets failed: %s", exc)
|
||||
return []
|
||||
|
||||
# -- object operations ------------------------------------------------
|
||||
|
||||
def list_objects(self, bucket: str = "", prefix: str = "",
|
||||
delimiter: str = "/") -> tuple[list[dict], list[str]]:
|
||||
"""List objects and common prefixes in a bucket/prefix.
|
||||
|
||||
Returns (objects, prefixes) where:
|
||||
- objects: list of {'Key', 'Size', 'LastModified'}
|
||||
- prefixes: list of prefix strings (subdirectories)
|
||||
"""
|
||||
if not self._ensure_connected():
|
||||
return [], []
|
||||
bucket = bucket or self._bucket
|
||||
if not bucket:
|
||||
return [], []
|
||||
try:
|
||||
objects = []
|
||||
prefixes = []
|
||||
paginator = self._client.get_paginator("list_objects_v2")
|
||||
kwargs = {"Bucket": bucket, "Delimiter": delimiter}
|
||||
if prefix:
|
||||
kwargs["Prefix"] = prefix
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for obj in page.get("Contents", []):
|
||||
# Skip the prefix itself
|
||||
if obj["Key"] != prefix:
|
||||
objects.append(obj)
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
prefixes.append(cp["Prefix"])
|
||||
return objects, prefixes
|
||||
except Exception as exc:
|
||||
log.error("S3 list_objects failed: %s", exc)
|
||||
return [], []
|
||||
|
||||
def upload_file(self, local_path: str, bucket: str, key: str,
|
||||
progress_cb=None, status_cb=None) -> bool:
|
||||
"""Upload a local file to S3 with retry and resume.
|
||||
|
||||
progress_cb(bytes_transferred) — called periodically for progress bar.
|
||||
status_cb(message) — called with status messages (retry info, etc.).
|
||||
|
||||
Uses multipart upload for files > 8 MB.
|
||||
On failure, retries up to 10 times with exponential backoff.
|
||||
boto3 multipart automatically resumes failed parts.
|
||||
"""
|
||||
if not self._ensure_connected():
|
||||
return False
|
||||
file_size = os.path.getsize(local_path)
|
||||
for attempt in range(_MAX_RETRIES):
|
||||
try:
|
||||
self._client.upload_file(
|
||||
local_path, bucket, key,
|
||||
Config=self._transfer_config,
|
||||
Callback=progress_cb,
|
||||
)
|
||||
log.info("S3 uploaded %s -> s3://%s/%s (%d bytes)",
|
||||
local_path, bucket, key, file_size)
|
||||
return True
|
||||
except Exception as exc:
|
||||
delay = _retry_delay(attempt)
|
||||
log.warning("S3 upload attempt %d/%d failed: %s (retry in %.0fs)",
|
||||
attempt + 1, _MAX_RETRIES, exc, delay)
|
||||
if status_cb:
|
||||
status_cb(f"Retry {attempt + 1}/{_MAX_RETRIES} in {delay:.0f}s...")
|
||||
# Reset progress for retry (callback accumulates)
|
||||
if progress_cb and attempt < _MAX_RETRIES - 1:
|
||||
# We can't easily reset boto3's internal counter,
|
||||
# but the GUI tracks total bytes itself
|
||||
pass
|
||||
time.sleep(delay)
|
||||
# Reconnect before retry
|
||||
if not self._reconnect():
|
||||
log.error("S3 reconnect failed on attempt %d", attempt + 1)
|
||||
continue
|
||||
|
||||
log.error("S3 upload failed after %d attempts: %s -> s3://%s/%s",
|
||||
_MAX_RETRIES, local_path, bucket, key)
|
||||
return False
|
||||
|
||||
def download_file(self, bucket: str, key: str, local_path: str,
|
||||
progress_cb=None, status_cb=None) -> bool:
|
||||
"""Download an S3 object to a local file with retry.
|
||||
|
||||
progress_cb(bytes_transferred) — called periodically.
|
||||
status_cb(message) — called with retry info.
|
||||
|
||||
boto3 TransferConfig.num_download_attempts handles part-level retries.
|
||||
This method adds full-transfer retries with reconnect.
|
||||
"""
|
||||
if not self._ensure_connected():
|
||||
return False
|
||||
for attempt in range(_MAX_RETRIES):
|
||||
try:
|
||||
self._client.download_file(
|
||||
bucket, key, local_path,
|
||||
Config=self._transfer_config,
|
||||
Callback=progress_cb,
|
||||
)
|
||||
log.info("S3 downloaded s3://%s/%s -> %s", bucket, key, local_path)
|
||||
return True
|
||||
except Exception as exc:
|
||||
delay = _retry_delay(attempt)
|
||||
log.warning("S3 download attempt %d/%d failed: %s (retry in %.0fs)",
|
||||
attempt + 1, _MAX_RETRIES, exc, delay)
|
||||
if status_cb:
|
||||
status_cb(f"Retry {attempt + 1}/{_MAX_RETRIES} in {delay:.0f}s...")
|
||||
time.sleep(delay)
|
||||
if not self._reconnect():
|
||||
log.error("S3 reconnect failed on attempt %d", attempt + 1)
|
||||
continue
|
||||
|
||||
log.error("S3 download failed after %d attempts: s3://%s/%s -> %s",
|
||||
_MAX_RETRIES, bucket, key, local_path)
|
||||
return False
|
||||
|
||||
def delete_object(self, bucket: str, key: str) -> bool:
|
||||
"""Delete an object from S3."""
|
||||
if not self._ensure_connected():
|
||||
return False
|
||||
try:
|
||||
self._client.delete_object(Bucket=bucket, Key=key)
|
||||
log.info("S3 deleted s3://%s/%s", bucket, key)
|
||||
return True
|
||||
except Exception as exc:
|
||||
log.error("S3 delete failed: %s", exc)
|
||||
return False
|
||||
|
||||
def get_object_size(self, bucket: str, key: str) -> int:
|
||||
"""Get size of an object in bytes."""
|
||||
if not self._ensure_connected():
|
||||
return 0
|
||||
try:
|
||||
resp = self._client.head_object(Bucket=bucket, Key=key)
|
||||
return resp.get("ContentLength", 0)
|
||||
except Exception:
|
||||
return 0
|
||||
@@ -26,7 +26,7 @@ BACKUP_DIR = os.path.join(SHARED_DIR, "backups")
|
||||
LOCAL_CONFIG_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "config")
|
||||
EXAMPLE_FILE = os.path.join(LOCAL_CONFIG_DIR, "servers.example.json")
|
||||
|
||||
SERVER_TYPES = ["ssh", "telnet", "rdp", "vnc", "winrm", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus"]
|
||||
SERVER_TYPES = ["ssh", "telnet", "rdp", "vnc", "winrm", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus", "s3"]
|
||||
|
||||
DEFAULT_PORTS = {
|
||||
"ssh": 22,
|
||||
@@ -40,6 +40,7 @@ DEFAULT_PORTS = {
|
||||
"redis": 6379,
|
||||
"grafana": 3000,
|
||||
"prometheus": 9090,
|
||||
"s3": 443,
|
||||
}
|
||||
|
||||
# Auto-backup interval: 10 minutes
|
||||
|
||||
@@ -19,6 +19,7 @@ _SSH_TYPE = {"ssh"}
|
||||
_SQL_TYPES = {"mariadb", "mssql", "postgresql"}
|
||||
_REDIS_TYPE = {"redis"}
|
||||
_HTTP_TYPES = {"grafana", "prometheus", "winrm"}
|
||||
_S3_TYPE = {"s3"}
|
||||
_TCP_TYPES = {"telnet", "rdp", "vnc"}
|
||||
|
||||
|
||||
@@ -60,6 +61,8 @@ class StatusChecker:
|
||||
return self._check_http(server, "/-/healthy")
|
||||
if server_type == "winrm":
|
||||
return self._check_http(server, "/wsman")
|
||||
if server_type in _S3_TYPE:
|
||||
return self._check_s3(server)
|
||||
if server_type in _TCP_TYPES:
|
||||
return self._check_tcp(server)
|
||||
|
||||
@@ -106,6 +109,17 @@ class StatusChecker:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _check_s3(self, server: dict) -> bool:
|
||||
"""Check S3 via list_buckets."""
|
||||
try:
|
||||
from core.s3_client import S3Client
|
||||
client = S3Client(server)
|
||||
result = client.connect()
|
||||
client.disconnect()
|
||||
return result
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _check_http(self, server: dict, path: str) -> bool:
|
||||
"""Check HTTP(S) endpoint."""
|
||||
try:
|
||||
|
||||
@@ -28,6 +28,7 @@ from gui.tabs.grafana_tab import GrafanaTab
|
||||
from gui.tabs.prometheus_tab import PrometheusTab
|
||||
from gui.tabs.powershell_tab import PowershellTab
|
||||
from gui.tabs.launch_tab import LaunchTab
|
||||
from gui.tabs.s3_tab import S3Tab
|
||||
|
||||
# Tab sets per server type — determines which tabs are shown
|
||||
TAB_REGISTRY = {
|
||||
@@ -42,6 +43,7 @@ TAB_REGISTRY = {
|
||||
"prometheus": ["metrics", "info", "setup"],
|
||||
"rdp": ["launch", "info", "setup"],
|
||||
"vnc": ["launch", "info", "setup"],
|
||||
"s3": ["objects", "info", "setup"],
|
||||
}
|
||||
|
||||
# Map tab key → widget class (used as lazy factory)
|
||||
@@ -58,6 +60,7 @@ TAB_CLASSES = {
|
||||
"metrics": PrometheusTab,
|
||||
"powershell": PowershellTab,
|
||||
"launch": LaunchTab,
|
||||
"objects": S3Tab,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ FIELD_MAP = {
|
||||
"prometheus": ["use_ssl"],
|
||||
"rdp": ["user", "password", "rdp_resolution", "rdp_quality", "rdp_clipboard", "rdp_drives", "rdp_printers"],
|
||||
"vnc": ["password"],
|
||||
"s3": ["access_key", "secret_key", "bucket", "use_ssl"],
|
||||
}
|
||||
|
||||
|
||||
@@ -242,6 +243,27 @@ class ServerDialog(ctk.CTkToplevel):
|
||||
ctk.CTkCheckBox(f, text=t("rdp_printers"), variable=self._rdp_printers_var).pack(fill="x", padx=20, pady=(4, 2))
|
||||
self._field_frames["rdp_printers"] = f
|
||||
|
||||
# --- access_key ---
|
||||
f = ctk.CTkFrame(self, fg_color="transparent")
|
||||
ctk.CTkLabel(f, text=t("access_key"), anchor="w").pack(fill="x", **pad)
|
||||
self.access_key_entry = ctk.CTkEntry(f, placeholder_text="AKIAIOSFODNN7EXAMPLE")
|
||||
self.access_key_entry.pack(fill="x", **entry_pad)
|
||||
self._field_frames["access_key"] = f
|
||||
|
||||
# --- secret_key ---
|
||||
f = ctk.CTkFrame(self, fg_color="transparent")
|
||||
ctk.CTkLabel(f, text=t("secret_key"), anchor="w").pack(fill="x", **pad)
|
||||
self.secret_key_entry = ctk.CTkEntry(f, show="*", placeholder_text=t("placeholder_secret_key"))
|
||||
self.secret_key_entry.pack(fill="x", **entry_pad)
|
||||
self._field_frames["secret_key"] = f
|
||||
|
||||
# --- bucket ---
|
||||
f = ctk.CTkFrame(self, fg_color="transparent")
|
||||
ctk.CTkLabel(f, text=t("bucket"), anchor="w").pack(fill="x", **pad)
|
||||
self.bucket_entry = ctk.CTkEntry(f, placeholder_text="my-bucket")
|
||||
self.bucket_entry.pack(fill="x", **entry_pad)
|
||||
self._field_frames["bucket"] = f
|
||||
|
||||
# --- use_ssl ---
|
||||
f = ctk.CTkFrame(self, fg_color="transparent")
|
||||
self.use_ssl_var = ctk.BooleanVar(value=False)
|
||||
@@ -282,6 +304,9 @@ class ServerDialog(ctk.CTkToplevel):
|
||||
self.db_index_entry.insert(0, str(server.get("db_index", "")))
|
||||
self.api_token_entry.insert(0, server.get("api_token", ""))
|
||||
self.use_ssl_var.set(server.get("use_ssl", False))
|
||||
self.access_key_entry.insert(0, server.get("access_key", ""))
|
||||
self.secret_key_entry.insert(0, server.get("secret_key", ""))
|
||||
self.bucket_entry.insert(0, server.get("bucket", ""))
|
||||
|
||||
# RDP settings
|
||||
res_raw = server.get("rdp_resolution", "auto")
|
||||
@@ -411,6 +436,21 @@ class ServerDialog(ctk.CTkToplevel):
|
||||
if token:
|
||||
server_data["api_token"] = token
|
||||
|
||||
if "access_key" in visible:
|
||||
ak = self.access_key_entry.get().strip()
|
||||
if ak:
|
||||
server_data["access_key"] = ak
|
||||
|
||||
if "secret_key" in visible:
|
||||
sk = self.secret_key_entry.get()
|
||||
if sk:
|
||||
server_data["secret_key"] = sk
|
||||
|
||||
if "bucket" in visible:
|
||||
bkt = self.bucket_entry.get().strip()
|
||||
if bkt:
|
||||
server_data["bucket"] = bkt
|
||||
|
||||
if "use_ssl" in visible:
|
||||
if self.use_ssl_var.get():
|
||||
server_data["use_ssl"] = True
|
||||
|
||||
543
gui/tabs/s3_tab.py
Normal file
543
gui/tabs/s3_tab.py
Normal file
@@ -0,0 +1,543 @@
|
||||
"""
|
||||
S3 tab — bucket/object browser with upload, download, delete actions.
|
||||
Supports drag-and-drop from OS file manager for upload.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from tkinter import ttk, filedialog
|
||||
|
||||
import customtkinter as ctk
|
||||
from core.s3_client import S3Client
|
||||
from core.i18n import t
|
||||
from core.icons import icon_text
|
||||
from gui.tabs.query_tab import apply_dark_scrollbar_style
|
||||
|
||||
|
||||
def _human_size(size_bytes: int) -> str:
|
||||
"""Format bytes to human-readable string."""
|
||||
if size_bytes < 1024:
|
||||
return f"{size_bytes} B"
|
||||
for unit in ("KB", "MB", "GB", "TB"):
|
||||
size_bytes /= 1024
|
||||
if size_bytes < 1024:
|
||||
return f"{size_bytes:.1f} {unit}"
|
||||
return f"{size_bytes:.1f} PB"
|
||||
|
||||
|
||||
def _setup_drop(widget, callback):
|
||||
"""Setup OS drag-and-drop onto a widget. Cross-platform with graceful fallback."""
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
import windnd
|
||||
windnd.hook_dropfiles(widget, func=callback)
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
# tkinterdnd2 fallback (works on Linux/macOS if installed)
|
||||
try:
|
||||
widget.drop_target_register("DND_Files")
|
||||
widget.dnd_bind("<<Drop>>", lambda e: callback(_parse_dnd_paths(e.data)))
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def _parse_dnd_paths(data: str) -> list[str]:
|
||||
"""Parse tkinterdnd2 drop data into list of file paths."""
|
||||
paths = []
|
||||
# tkinterdnd2 wraps paths with spaces in braces: {C:/path with spaces/file.txt}
|
||||
i = 0
|
||||
while i < len(data):
|
||||
if data[i] == '{':
|
||||
end = data.index('}', i)
|
||||
paths.append(data[i + 1:end])
|
||||
i = end + 2 # skip } and space
|
||||
elif data[i] == ' ':
|
||||
i += 1
|
||||
else:
|
||||
end = data.find(' ', i)
|
||||
if end == -1:
|
||||
end = len(data)
|
||||
paths.append(data[i:end])
|
||||
i = end + 1
|
||||
return paths
|
||||
|
||||
|
||||
class S3Tab(ctk.CTkFrame):
|
||||
def __init__(self, master, store):
|
||||
super().__init__(master, fg_color="transparent")
|
||||
self.store = store
|
||||
self._current_alias: str | None = None
|
||||
self._client: S3Client | None = None
|
||||
self._current_bucket: str = ""
|
||||
self._current_prefix: str = ""
|
||||
self._nav_stack: list[str] = []
|
||||
self._dnd_active = False
|
||||
|
||||
self._build_ui()
|
||||
|
||||
def _build_ui(self):
|
||||
apply_dark_scrollbar_style()
|
||||
|
||||
# ── Header ──
|
||||
header = ctk.CTkFrame(self, fg_color="transparent")
|
||||
header.pack(fill="x", padx=15, pady=(15, 5))
|
||||
|
||||
self._title_label = ctk.CTkLabel(
|
||||
header, text=t("s3_objects"),
|
||||
font=ctk.CTkFont(size=18, weight="bold"),
|
||||
)
|
||||
self._title_label.pack(side="left")
|
||||
|
||||
self._status_label = ctk.CTkLabel(
|
||||
header, text="", font=ctk.CTkFont(size=12),
|
||||
text_color="#9ca3af",
|
||||
)
|
||||
self._status_label.pack(side="left", padx=(15, 0))
|
||||
|
||||
# Buttons
|
||||
btn_frame = ctk.CTkFrame(header, fg_color="transparent")
|
||||
btn_frame.pack(side="right")
|
||||
|
||||
self._back_btn = ctk.CTkButton(
|
||||
btn_frame, text=icon_text("back", t("s3_back")), width=80,
|
||||
command=self._go_back, state="disabled",
|
||||
)
|
||||
self._back_btn.pack(side="left", padx=(0, 5))
|
||||
|
||||
self._refresh_btn = ctk.CTkButton(
|
||||
btn_frame, text=icon_text("refresh", t("s3_refresh")), width=100,
|
||||
command=self._refresh,
|
||||
)
|
||||
self._refresh_btn.pack(side="left", padx=(0, 5))
|
||||
|
||||
self._upload_btn = ctk.CTkButton(
|
||||
btn_frame, text=icon_text("upload", t("s3_upload")), width=100,
|
||||
command=self._upload,
|
||||
)
|
||||
self._upload_btn.pack(side="left", padx=(0, 5))
|
||||
|
||||
self._download_btn = ctk.CTkButton(
|
||||
btn_frame, text=icon_text("download", t("s3_download")), width=110,
|
||||
command=self._download,
|
||||
)
|
||||
self._download_btn.pack(side="left", padx=(0, 5))
|
||||
|
||||
self._delete_btn = ctk.CTkButton(
|
||||
btn_frame, text=icon_text("delete", t("s3_delete")), width=100,
|
||||
fg_color="#dc2626", hover_color="#b91c1c",
|
||||
command=self._delete,
|
||||
)
|
||||
self._delete_btn.pack(side="left")
|
||||
|
||||
# ── Bucket selector row ──
|
||||
bucket_frame = ctk.CTkFrame(self, fg_color="transparent")
|
||||
bucket_frame.pack(fill="x", padx=15, pady=(5, 5))
|
||||
|
||||
ctk.CTkLabel(bucket_frame, text=t("s3_bucket"),
|
||||
font=ctk.CTkFont(size=12, weight="bold")).pack(side="left", padx=(0, 5))
|
||||
|
||||
self._bucket_var = ctk.StringVar(value="")
|
||||
self._bucket_menu = ctk.CTkOptionMenu(
|
||||
bucket_frame, variable=self._bucket_var, values=[""],
|
||||
width=200, command=self._on_bucket_change,
|
||||
)
|
||||
self._bucket_menu.pack(side="left", padx=(0, 15))
|
||||
|
||||
# Path display
|
||||
self._path_label = ctk.CTkLabel(
|
||||
bucket_frame, text="/", font=ctk.CTkFont(family="Consolas", size=12),
|
||||
text_color="#60a5fa",
|
||||
)
|
||||
self._path_label.pack(side="left", fill="x", expand=True)
|
||||
|
||||
# ── Progress bar (hidden by default) ──
|
||||
self._progress_frame = ctk.CTkFrame(self, fg_color="transparent")
|
||||
# Don't pack yet — shown only during transfer
|
||||
|
||||
self._progress_label = ctk.CTkLabel(
|
||||
self._progress_frame, text="", font=ctk.CTkFont(size=11),
|
||||
text_color="#9ca3af",
|
||||
)
|
||||
self._progress_label.pack(side="left", padx=(0, 10))
|
||||
|
||||
self._progress_bar = ctk.CTkProgressBar(self._progress_frame, width=300, height=14)
|
||||
self._progress_bar.set(0)
|
||||
self._progress_bar.pack(side="left", fill="x", expand=True)
|
||||
|
||||
self._progress_pct = ctk.CTkLabel(
|
||||
self._progress_frame, text="0%", font=ctk.CTkFont(size=11, weight="bold"),
|
||||
text_color="#60a5fa", width=45,
|
||||
)
|
||||
self._progress_pct.pack(side="left", padx=(10, 0))
|
||||
|
||||
self._transfer_bytes = 0
|
||||
self._transfer_total = 0
|
||||
|
||||
# ── Treeview for objects ──
|
||||
self._tree_frame = ctk.CTkFrame(self, fg_color="#1e1e1e", corner_radius=8)
|
||||
self._tree_frame.pack(fill="both", expand=True, padx=15, pady=(5, 15))
|
||||
|
||||
columns = ("name", "size", "modified")
|
||||
self._tree = ttk.Treeview(
|
||||
self._tree_frame, columns=columns, show="headings",
|
||||
selectmode="browse", style="Dark.Treeview",
|
||||
)
|
||||
self._tree.heading("name", text=t("s3_col_name"))
|
||||
self._tree.heading("size", text=t("s3_col_size"))
|
||||
self._tree.heading("modified", text=t("s3_col_modified"))
|
||||
self._tree.column("name", width=400, minwidth=200)
|
||||
self._tree.column("size", width=100, minwidth=80, anchor="e")
|
||||
self._tree.column("modified", width=180, minwidth=120)
|
||||
|
||||
scrollbar = ttk.Scrollbar(self._tree_frame, orient="vertical",
|
||||
command=self._tree.yview,
|
||||
style="Dark.Vertical.TScrollbar")
|
||||
self._tree.configure(yscrollcommand=scrollbar.set)
|
||||
|
||||
self._tree.pack(side="left", fill="both", expand=True)
|
||||
scrollbar.pack(side="right", fill="y")
|
||||
|
||||
# Double-click to enter prefix (folder) or download file
|
||||
self._tree.bind("<Double-1>", self._on_double_click)
|
||||
|
||||
# Dark treeview style
|
||||
style = ttk.Style()
|
||||
style.configure("Dark.Treeview",
|
||||
background="#2b2b2b", foreground="#e5e5e5",
|
||||
fieldbackground="#2b2b2b", borderwidth=0, rowheight=26)
|
||||
style.configure("Dark.Treeview.Heading",
|
||||
background="#333333", foreground="#e5e5e5",
|
||||
borderwidth=0, relief="flat")
|
||||
style.map("Dark.Treeview",
|
||||
background=[("selected", "#1e3a5f")],
|
||||
foreground=[("selected", "#ffffff")])
|
||||
|
||||
# ── Drop zone overlay (shown when no files / as hint) ──
|
||||
self._drop_hint = ctk.CTkLabel(
|
||||
self._tree_frame,
|
||||
text=t("s3_drop_hint"),
|
||||
font=ctk.CTkFont(size=14),
|
||||
text_color="#6b7280",
|
||||
)
|
||||
|
||||
# Setup OS drag-and-drop on the treeview area
|
||||
self.after(200, self._init_dnd)
|
||||
|
||||
def _init_dnd(self):
|
||||
"""Initialize drag-and-drop after widget is mapped."""
|
||||
try:
|
||||
self._dnd_active = _setup_drop(self._tree, self._on_files_dropped)
|
||||
if not self._dnd_active:
|
||||
# Try on the frame too
|
||||
self._dnd_active = _setup_drop(self._tree_frame, self._on_files_dropped)
|
||||
except Exception:
|
||||
self._dnd_active = False
|
||||
|
||||
def _on_files_dropped(self, files):
|
||||
"""Handle files dropped from OS file manager."""
|
||||
if not self._client or not self._current_bucket:
|
||||
return
|
||||
# windnd gives list of bytes on Windows
|
||||
paths = []
|
||||
for f in files:
|
||||
if isinstance(f, bytes):
|
||||
paths.append(f.decode("utf-8", errors="replace"))
|
||||
else:
|
||||
paths.append(str(f))
|
||||
paths = [p for p in paths if os.path.isfile(p)]
|
||||
if not paths:
|
||||
return
|
||||
self._upload_files(paths)
|
||||
|
||||
def _show_progress(self, label: str, total_bytes: int):
|
||||
"""Show and reset the progress bar."""
|
||||
self._transfer_bytes = 0
|
||||
self._transfer_total = max(total_bytes, 1)
|
||||
self._progress_bar.set(0)
|
||||
self._progress_pct.configure(text="0%")
|
||||
self._progress_label.configure(text=label)
|
||||
self._progress_frame.pack(fill="x", padx=15, pady=(2, 2),
|
||||
before=self._tree_frame)
|
||||
|
||||
def _hide_progress(self):
|
||||
"""Hide the progress bar."""
|
||||
self._progress_frame.pack_forget()
|
||||
|
||||
def _on_progress(self, chunk_bytes: int):
|
||||
"""Called from transfer thread — schedule GUI update."""
|
||||
self._transfer_bytes += chunk_bytes
|
||||
self.after(0, self._update_progress)
|
||||
|
||||
def _update_progress(self):
|
||||
"""Update progress bar on GUI thread."""
|
||||
if self._transfer_total <= 0:
|
||||
return
|
||||
ratio = min(self._transfer_bytes / self._transfer_total, 1.0)
|
||||
self._progress_bar.set(ratio)
|
||||
pct = int(ratio * 100)
|
||||
self._progress_pct.configure(text=f"{pct}%")
|
||||
size_str = f"{_human_size(self._transfer_bytes)} / {_human_size(self._transfer_total)}"
|
||||
label = self._progress_label.cget("text").split(" — ")[0]
|
||||
self._progress_label.configure(text=f"{label} — {size_str}")
|
||||
|
||||
def _on_transfer_status(self, message: str):
|
||||
"""Called from transfer thread with retry/status info."""
|
||||
# Reset progress on retry (boto3 restarts the transfer)
|
||||
self._transfer_bytes = 0
|
||||
self.after(0, lambda: self._status_label.configure(text=message))
|
||||
|
||||
def _upload_files(self, paths: list[str]):
|
||||
"""Upload multiple files to current prefix."""
|
||||
if not self._client or not self._current_bucket:
|
||||
return
|
||||
total_files = len(paths)
|
||||
total_bytes = sum(os.path.getsize(p) for p in paths if os.path.isfile(p))
|
||||
label = (t("s3_uploading_n").format(count=total_files) if total_files > 1
|
||||
else t("s3_uploading"))
|
||||
self._status_label.configure(text=label)
|
||||
self._show_progress(label, total_bytes)
|
||||
|
||||
def _do():
|
||||
ok_count = 0
|
||||
for path in paths:
|
||||
filename = os.path.basename(path)
|
||||
key = self._current_prefix + filename
|
||||
if self._client.upload_file(
|
||||
path, self._current_bucket, key,
|
||||
progress_cb=self._on_progress,
|
||||
status_cb=self._on_transfer_status):
|
||||
ok_count += 1
|
||||
self.after(0, lambda: self._on_upload_done(ok_count, total_files))
|
||||
|
||||
threading.Thread(target=_do, daemon=True).start()
|
||||
|
||||
def _on_upload_done(self, ok_count: int, total: int):
|
||||
self._hide_progress()
|
||||
if ok_count == total:
|
||||
self._status_label.configure(
|
||||
text=t("s3_uploaded_n").format(count=ok_count))
|
||||
else:
|
||||
self._status_label.configure(
|
||||
text=t("s3_upload_partial").format(ok=ok_count, total=total))
|
||||
self._refresh()
|
||||
|
||||
# -- server switch ----------------------------------------------------
|
||||
|
||||
def set_server(self, alias: str | None):
|
||||
"""Called when user selects a server in sidebar."""
|
||||
if alias == self._current_alias:
|
||||
return
|
||||
self._current_alias = alias
|
||||
if not alias:
|
||||
self._client = None
|
||||
self._tree.delete(*self._tree.get_children())
|
||||
self._status_label.configure(text="")
|
||||
return
|
||||
self._client = None
|
||||
self._current_prefix = ""
|
||||
self._nav_stack.clear()
|
||||
self._tree.delete(*self._tree.get_children())
|
||||
self._status_label.configure(text=t("s3_connecting"))
|
||||
|
||||
server = self.store.get_server(alias)
|
||||
if not server:
|
||||
return
|
||||
|
||||
self._current_bucket = server.get("bucket", "")
|
||||
|
||||
def _connect():
|
||||
client = S3Client(server)
|
||||
ok = client.connect()
|
||||
if ok:
|
||||
self._client = client
|
||||
self.after(0, self._load_buckets)
|
||||
else:
|
||||
self.after(0, lambda: self._status_label.configure(
|
||||
text=t("s3_connect_failed")))
|
||||
|
||||
threading.Thread(target=_connect, daemon=True).start()
|
||||
|
||||
def _load_buckets(self):
|
||||
if not self._client:
|
||||
return
|
||||
|
||||
def _fetch():
|
||||
buckets = self._client.list_buckets()
|
||||
names = [b["Name"] for b in buckets]
|
||||
self.after(0, lambda: self._update_buckets(names))
|
||||
|
||||
threading.Thread(target=_fetch, daemon=True).start()
|
||||
|
||||
def _update_buckets(self, names: list[str]):
|
||||
if not names:
|
||||
names = [""]
|
||||
self._bucket_menu.configure(values=names)
|
||||
if self._current_bucket and self._current_bucket in names:
|
||||
self._bucket_var.set(self._current_bucket)
|
||||
elif names:
|
||||
self._bucket_var.set(names[0])
|
||||
self._current_bucket = names[0]
|
||||
self._refresh()
|
||||
|
||||
def _on_bucket_change(self, value: str):
|
||||
self._current_bucket = value
|
||||
self._current_prefix = ""
|
||||
self._nav_stack.clear()
|
||||
self._refresh()
|
||||
|
||||
# -- navigation -------------------------------------------------------
|
||||
|
||||
def _refresh(self):
|
||||
if not self._client or not self._current_bucket:
|
||||
return
|
||||
self._status_label.configure(text=t("s3_loading"))
|
||||
self._path_label.configure(text=f"/{self._current_prefix}" if self._current_prefix else "/")
|
||||
|
||||
def _fetch():
|
||||
objects, prefixes = self._client.list_objects(
|
||||
self._current_bucket, self._current_prefix)
|
||||
self.after(0, lambda: self._display(objects, prefixes))
|
||||
|
||||
threading.Thread(target=_fetch, daemon=True).start()
|
||||
|
||||
def _display(self, objects: list[dict], prefixes: list[str]):
|
||||
self._tree.delete(*self._tree.get_children())
|
||||
|
||||
# Folders first
|
||||
for prefix in sorted(prefixes):
|
||||
display_name = prefix[len(self._current_prefix):]
|
||||
if display_name.endswith("/"):
|
||||
display_name = display_name[:-1]
|
||||
self._tree.insert("", "end", values=(
|
||||
f"\U0001f4c1 {display_name}/", "", ""),
|
||||
tags=("folder",),
|
||||
)
|
||||
|
||||
# Files
|
||||
for obj in sorted(objects, key=lambda o: o["Key"]):
|
||||
name = obj["Key"][len(self._current_prefix):]
|
||||
size = _human_size(obj.get("Size", 0))
|
||||
modified = ""
|
||||
if obj.get("LastModified"):
|
||||
modified = obj["LastModified"].strftime("%Y-%m-%d %H:%M:%S")
|
||||
self._tree.insert("", "end", values=(name, size, modified),
|
||||
tags=("file",))
|
||||
|
||||
count = len(objects) + len(prefixes)
|
||||
self._status_label.configure(text=t("s3_items_count").format(count=count))
|
||||
self._back_btn.configure(
|
||||
state="normal" if self._current_prefix else "disabled")
|
||||
|
||||
# Show drop hint if empty
|
||||
if count == 0:
|
||||
self._drop_hint.place(relx=0.5, rely=0.5, anchor="center")
|
||||
else:
|
||||
self._drop_hint.place_forget()
|
||||
|
||||
def _on_double_click(self, event):
|
||||
sel = self._tree.selection()
|
||||
if not sel:
|
||||
return
|
||||
item = self._tree.item(sel[0])
|
||||
name = item["values"][0] if item["values"] else ""
|
||||
if not isinstance(name, str):
|
||||
name = str(name)
|
||||
# Check if it's a folder
|
||||
if name.endswith("/"):
|
||||
# Strip folder icon
|
||||
clean = name.replace("\U0001f4c1 ", "").strip()
|
||||
self._nav_stack.append(self._current_prefix)
|
||||
self._current_prefix = self._current_prefix + clean
|
||||
if not self._current_prefix.endswith("/"):
|
||||
self._current_prefix += "/"
|
||||
self._refresh()
|
||||
else:
|
||||
# Double-click file = download
|
||||
self._download()
|
||||
|
||||
def _go_back(self):
|
||||
if self._nav_stack:
|
||||
self._current_prefix = self._nav_stack.pop()
|
||||
else:
|
||||
self._current_prefix = ""
|
||||
self._refresh()
|
||||
|
||||
# -- actions ----------------------------------------------------------
|
||||
|
||||
def _upload(self):
|
||||
if not self._client or not self._current_bucket:
|
||||
return
|
||||
paths = filedialog.askopenfilenames()
|
||||
if not paths:
|
||||
return
|
||||
self._upload_files(list(paths))
|
||||
|
||||
def _download(self):
|
||||
sel = self._tree.selection()
|
||||
if not sel:
|
||||
return
|
||||
item = self._tree.item(sel[0])
|
||||
name = item["values"][0] if item["values"] else ""
|
||||
if not isinstance(name, str):
|
||||
name = str(name)
|
||||
if name.endswith("/"):
|
||||
return # Can't download a folder
|
||||
|
||||
key = self._current_prefix + name
|
||||
save_path = filedialog.asksaveasfilename(initialfile=name)
|
||||
if not save_path:
|
||||
return
|
||||
|
||||
# Get file size for progress
|
||||
total_bytes = self._client.get_object_size(self._current_bucket, key)
|
||||
label = t("s3_downloading")
|
||||
self._status_label.configure(text=label)
|
||||
self._show_progress(label, total_bytes)
|
||||
|
||||
def _do():
|
||||
ok = self._client.download_file(
|
||||
self._current_bucket, key, save_path,
|
||||
progress_cb=self._on_progress,
|
||||
status_cb=self._on_transfer_status)
|
||||
if ok:
|
||||
self.after(0, lambda: self._on_download_done(True))
|
||||
else:
|
||||
self.after(0, lambda: self._on_download_done(False))
|
||||
|
||||
threading.Thread(target=_do, daemon=True).start()
|
||||
|
||||
def _on_download_done(self, success: bool):
|
||||
self._hide_progress()
|
||||
if success:
|
||||
self._status_label.configure(text=t("s3_download_ok"))
|
||||
else:
|
||||
self._status_label.configure(text=t("s3_download_failed"))
|
||||
|
||||
def _delete(self):
|
||||
sel = self._tree.selection()
|
||||
if not sel:
|
||||
return
|
||||
item = self._tree.item(sel[0])
|
||||
name = item["values"][0] if item["values"] else ""
|
||||
if not isinstance(name, str):
|
||||
name = str(name)
|
||||
if name.endswith("/"):
|
||||
return # Don't delete prefixes
|
||||
|
||||
key = self._current_prefix + name
|
||||
|
||||
def _do():
|
||||
ok = self._client.delete_object(self._current_bucket, key)
|
||||
if ok:
|
||||
self.after(0, self._refresh)
|
||||
else:
|
||||
self.after(0, lambda: self._status_label.configure(
|
||||
text=t("s3_delete_failed")))
|
||||
|
||||
self._status_label.configure(text=t("s3_deleting"))
|
||||
threading.Thread(target=_do, daemon=True).start()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -12,3 +12,5 @@ redis>=5.0.0
|
||||
requests>=2.31.0
|
||||
pywinrm>=0.4.3
|
||||
telnetlib3>=2.0.0
|
||||
boto3>=1.28.0
|
||||
windnd>=1.0.7; sys_platform == "win32"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Скилл /ssh — управление удалёнными серверами
|
||||
|
||||
Ты управляешь удалёнными серверами через универсальную CLI-утилиту.
|
||||
Поддерживаются: SSH, SQL (MariaDB/MSSQL/PostgreSQL), Redis, Grafana, Prometheus, WinRM (PowerShell/CMD).
|
||||
Поддерживаются: SSH, SQL (MariaDB/MSSQL/PostgreSQL), Redis, S3, Grafana, Prometheus, WinRM (PowerShell/CMD).
|
||||
|
||||
## ВАЖНО — Безопасность
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
| `telnet` | `ALIAS "command"` (как ssh, но без SFTP/sudo/ключей) |
|
||||
| `mariadb` / `mssql` / `postgresql` | `--sql`, `--sql-databases`, `--sql-tables` |
|
||||
| `redis` | `--redis`, `--redis-info`, `--redis-keys` |
|
||||
| `s3` | `--s3-buckets`, `--s3-ls`, `--s3-upload`, `--s3-download`, `--s3-delete` |
|
||||
| `grafana` | `--grafana-dashboards`, `--grafana-alerts` |
|
||||
| `prometheus` | `--prom-query`, `--prom-targets`, `--prom-alerts` |
|
||||
| `winrm` | `--ps`, `--cmd` |
|
||||
@@ -78,7 +79,7 @@ python ~/.server-connections/ssh.py --remove ALIAS
|
||||
|
||||
### Добавить сервер
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp] [--note "описание"] [--database DB] [--token TOKEN]
|
||||
python ~/.server-connections/ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp|s3] [--note "описание"] [--database DB] [--token TOKEN]
|
||||
```
|
||||
- Автоматически устанавливает SSH-ключ после добавления
|
||||
- Обновляет `~/.ssh/config`
|
||||
@@ -158,6 +159,34 @@ python ~/.server-connections/ssh.py --redis-info ALIAS
|
||||
python ~/.server-connections/ssh.py --redis-keys ALIAS "user:*"
|
||||
```
|
||||
|
||||
## S3-команды (тип: s3)
|
||||
|
||||
### Список бакетов
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --s3-buckets ALIAS
|
||||
```
|
||||
|
||||
### Список объектов
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --s3-ls ALIAS bucket
|
||||
python ~/.server-connections/ssh.py --s3-ls ALIAS bucket/prefix/
|
||||
```
|
||||
|
||||
### Загрузить файл в S3
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --s3-upload ALIAS "D:/local/file" bucket/key
|
||||
```
|
||||
|
||||
### Скачать файл из S3
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --s3-download ALIAS bucket/key "D:/local/file"
|
||||
```
|
||||
|
||||
### Удалить объект
|
||||
```bash
|
||||
python ~/.server-connections/ssh.py --s3-delete ALIAS bucket/key
|
||||
```
|
||||
|
||||
## Grafana-команды (тип: grafana)
|
||||
|
||||
### Список дашбордов
|
||||
|
||||
237
tools/ssh.py
237
tools/ssh.py
@@ -14,7 +14,7 @@ Usage (SSH):
|
||||
python ssh.py --status
|
||||
python ssh.py --info ALIAS # full info (no passwords)
|
||||
python ssh.py --set-note ALIAS "desc" # update server notes
|
||||
python ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp] [--note "desc"] [--database DB] [--token TOKEN]
|
||||
python ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp|s3] [--note "desc"] [--database DB] [--token TOKEN]
|
||||
python ssh.py --remove ALIAS
|
||||
|
||||
SQL (type: mariadb / mssql / postgresql):
|
||||
@@ -36,6 +36,13 @@ Prometheus (type: prometheus):
|
||||
python ssh.py --prom-targets ALIAS # list targets
|
||||
python ssh.py --prom-alerts ALIAS # list alerts
|
||||
|
||||
S3 (type: s3):
|
||||
python ssh.py --s3-buckets ALIAS # list buckets
|
||||
python ssh.py --s3-ls ALIAS [bucket[/prefix]] # list objects
|
||||
python ssh.py --s3-upload ALIAS local bucket/key # upload file
|
||||
python ssh.py --s3-download ALIAS bucket/key local # download file
|
||||
python ssh.py --s3-delete ALIAS bucket/key # delete object
|
||||
|
||||
WinRM (type: winrm):
|
||||
python ssh.py --ps ALIAS "Get-Process" # PowerShell via WinRM
|
||||
python ssh.py --cmd ALIAS "dir" # CMD via WinRM
|
||||
@@ -964,7 +971,7 @@ def add_server(args):
|
||||
i += 1
|
||||
|
||||
# Validate server type
|
||||
valid_types = ["ssh", "telnet", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus", "winrm", "rdp"]
|
||||
valid_types = ["ssh", "telnet", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus", "winrm", "rdp", "s3"]
|
||||
if stype not in valid_types:
|
||||
print(f"ERROR: Invalid server type '{stype}'. Valid types: {', '.join(valid_types)}")
|
||||
sys.exit(1)
|
||||
@@ -991,6 +998,13 @@ def add_server(args):
|
||||
elif stype in ["redis", "grafana", "prometheus"]:
|
||||
if token:
|
||||
new_server["token"] = token
|
||||
elif stype == "s3":
|
||||
# S3: user=access_key, password=secret_key, ip=endpoint
|
||||
new_server["access_key"] = user
|
||||
new_server["secret_key"] = password
|
||||
new_server["use_ssl"] = True
|
||||
if database:
|
||||
new_server["bucket"] = database
|
||||
elif stype in ["winrm", "rdp"]:
|
||||
# WinRM/RDP may have additional auth fields
|
||||
new_server["auth_method"] = "password" # default
|
||||
@@ -1254,6 +1268,197 @@ def redis_keys(server: dict, pattern: str):
|
||||
r.close()
|
||||
|
||||
|
||||
# ── S3 commands ──────────────────────────────────
|
||||
|
||||
def _get_s3_client(server: dict):
|
||||
"""Create and connect a boto3 S3 client from server dict."""
|
||||
try:
|
||||
import boto3
|
||||
import botocore.config
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
except ImportError:
|
||||
print("ERROR: boto3 not installed. Run: pip install boto3", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
endpoint = server.get("ip", "")
|
||||
if endpoint and not endpoint.startswith("http"):
|
||||
use_ssl = server.get("use_ssl", True)
|
||||
scheme = "https" if use_ssl else "http"
|
||||
port = int(server.get("port", 443))
|
||||
if (scheme == "https" and port == 443) or (scheme == "http" and port == 80):
|
||||
endpoint = f"{scheme}://{endpoint}"
|
||||
else:
|
||||
endpoint = f"{scheme}://{endpoint}:{port}"
|
||||
config = botocore.config.Config(
|
||||
signature_version="s3v4",
|
||||
connect_timeout=15,
|
||||
read_timeout=60,
|
||||
retries={"max_attempts": 5, "mode": "adaptive"},
|
||||
tcp_keepalive=True,
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=endpoint,
|
||||
aws_access_key_id=server.get("access_key", ""),
|
||||
aws_secret_access_key=server.get("secret_key", ""),
|
||||
config=config,
|
||||
verify=False,
|
||||
)
|
||||
|
||||
|
||||
def s3_buckets(server: dict):
|
||||
"""List all S3 buckets."""
|
||||
client = _get_s3_client(server)
|
||||
try:
|
||||
resp = client.list_buckets()
|
||||
buckets = resp.get("Buckets", [])
|
||||
if not buckets:
|
||||
print("(no buckets)")
|
||||
return
|
||||
print(f"{'Name':<40} {'Created'}")
|
||||
print("-" * 65)
|
||||
for b in buckets:
|
||||
created = b.get("CreationDate", "")
|
||||
if created:
|
||||
created = created.strftime("%Y-%m-%d %H:%M:%S")
|
||||
print(f"{b['Name']:<40} {created}")
|
||||
print(f"\n({len(buckets)} bucket{'s' if len(buckets) != 1 else ''})")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def s3_ls(server: dict, path: str = ""):
|
||||
"""List objects in a bucket[/prefix]."""
|
||||
client = _get_s3_client(server)
|
||||
# Parse bucket/prefix from path
|
||||
parts = path.split("/", 1) if path else []
|
||||
bucket = parts[0] if parts else server.get("bucket", "")
|
||||
prefix = parts[1] if len(parts) > 1 else ""
|
||||
if not bucket:
|
||||
print("ERROR: No bucket specified. Usage: --s3-ls ALIAS bucket[/prefix]", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
try:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
kwargs = {"Bucket": bucket, "Delimiter": "/"}
|
||||
if prefix:
|
||||
if not prefix.endswith("/"):
|
||||
prefix += "/"
|
||||
kwargs["Prefix"] = prefix
|
||||
total = 0
|
||||
for page in paginator.paginate(**kwargs):
|
||||
for cp in page.get("CommonPrefixes", []):
|
||||
p = cp["Prefix"]
|
||||
if prefix:
|
||||
p = p[len(prefix):]
|
||||
print(f" DIR {p}")
|
||||
total += 1
|
||||
for obj in page.get("Contents", []):
|
||||
key = obj["Key"]
|
||||
if key == prefix:
|
||||
continue
|
||||
name = key[len(prefix):] if prefix else key
|
||||
size = obj.get("Size", 0)
|
||||
modified = obj.get("LastModified", "")
|
||||
if modified:
|
||||
modified = modified.strftime("%Y-%m-%d %H:%M")
|
||||
print(f"{size:>10} {modified} {name}")
|
||||
total += 1
|
||||
print(f"\n({total} item{'s' if total != 1 else ''})")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _s3_transfer_config():
|
||||
from boto3.s3.transfer import TransferConfig
|
||||
return TransferConfig(
|
||||
multipart_threshold=8 * 1024 * 1024,
|
||||
multipart_chunksize=8 * 1024 * 1024,
|
||||
max_concurrency=4,
|
||||
num_download_attempts=10,
|
||||
)
|
||||
|
||||
|
||||
def s3_upload(server: dict, local_path: str, remote_path: str):
|
||||
"""Upload a file to S3 with retry and resume."""
|
||||
# Parse bucket/key
|
||||
parts = remote_path.split("/", 1)
|
||||
bucket = parts[0] if parts else server.get("bucket", "")
|
||||
key = parts[1] if len(parts) > 1 else os.path.basename(local_path)
|
||||
if not bucket:
|
||||
print("ERROR: No bucket. Usage: --s3-upload ALIAS local bucket/key", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not os.path.isfile(local_path):
|
||||
print(f"ERROR: File not found: {local_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
size = os.path.getsize(local_path)
|
||||
config = _s3_transfer_config()
|
||||
max_retries = 10
|
||||
for attempt in range(max_retries):
|
||||
client = _get_s3_client(server)
|
||||
try:
|
||||
print(f"Uploading {local_path} -> s3://{bucket}/{key} ({size} bytes)...")
|
||||
client.upload_file(local_path, bucket, key, Config=config)
|
||||
print("OK")
|
||||
return
|
||||
except Exception as e:
|
||||
delay = min(2 * (2 ** attempt), 60)
|
||||
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}", file=sys.stderr)
|
||||
if attempt < max_retries - 1:
|
||||
print(f"Retrying in {delay}s...", file=sys.stderr)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
print(f"ERROR: Upload failed after {max_retries} attempts", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def s3_download(server: dict, remote_path: str, local_path: str):
|
||||
"""Download an object from S3 with retry."""
|
||||
parts = remote_path.split("/", 1)
|
||||
bucket = parts[0] if parts else server.get("bucket", "")
|
||||
key = parts[1] if len(parts) > 1 else ""
|
||||
if not bucket or not key:
|
||||
print("ERROR: Usage: --s3-download ALIAS bucket/key local_path", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
config = _s3_transfer_config()
|
||||
max_retries = 10
|
||||
for attempt in range(max_retries):
|
||||
client = _get_s3_client(server)
|
||||
try:
|
||||
print(f"Downloading s3://{bucket}/{key} -> {local_path}...")
|
||||
client.download_file(bucket, key, local_path, Config=config)
|
||||
size = os.path.getsize(local_path)
|
||||
print(f"OK ({size} bytes)")
|
||||
return
|
||||
except Exception as e:
|
||||
delay = min(2 * (2 ** attempt), 60)
|
||||
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}", file=sys.stderr)
|
||||
if attempt < max_retries - 1:
|
||||
print(f"Retrying in {delay}s...", file=sys.stderr)
|
||||
time.sleep(delay)
|
||||
else:
|
||||
print(f"ERROR: Download failed after {max_retries} attempts", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def s3_delete(server: dict, remote_path: str):
|
||||
"""Delete an object from S3."""
|
||||
client = _get_s3_client(server)
|
||||
parts = remote_path.split("/", 1)
|
||||
bucket = parts[0] if parts else server.get("bucket", "")
|
||||
key = parts[1] if len(parts) > 1 else ""
|
||||
if not bucket or not key:
|
||||
print("ERROR: Usage: --s3-delete ALIAS bucket/key", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
try:
|
||||
client.delete_object(Bucket=bucket, Key=key)
|
||||
print(f"Deleted s3://{bucket}/{key}")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# ── Grafana commands ──────────────────────────────────
|
||||
|
||||
def _grafana_request(server: dict, endpoint: str) -> dict:
|
||||
@@ -1531,6 +1736,34 @@ def main():
|
||||
redis_keys(servers[alias], sys.argv[3])
|
||||
sys.exit(0)
|
||||
|
||||
# ── S3 commands ──
|
||||
if cmd == "--s3-buckets" and len(sys.argv) >= 3:
|
||||
_, servers = load_servers()
|
||||
alias = _resolve_alias(sys.argv[2], servers)
|
||||
s3_buckets(servers[alias])
|
||||
sys.exit(0)
|
||||
if cmd == "--s3-ls" and len(sys.argv) >= 3:
|
||||
_, servers = load_servers()
|
||||
alias = _resolve_alias(sys.argv[2], servers)
|
||||
path = sys.argv[3] if len(sys.argv) >= 4 else ""
|
||||
s3_ls(servers[alias], path)
|
||||
sys.exit(0)
|
||||
if cmd == "--s3-upload" and len(sys.argv) >= 5:
|
||||
_, servers = load_servers()
|
||||
alias = _resolve_alias(sys.argv[2], servers)
|
||||
s3_upload(servers[alias], sys.argv[3], sys.argv[4])
|
||||
sys.exit(0)
|
||||
if cmd == "--s3-download" and len(sys.argv) >= 5:
|
||||
_, servers = load_servers()
|
||||
alias = _resolve_alias(sys.argv[2], servers)
|
||||
s3_download(servers[alias], sys.argv[3], sys.argv[4])
|
||||
sys.exit(0)
|
||||
if cmd == "--s3-delete" and len(sys.argv) >= 4:
|
||||
_, servers = load_servers()
|
||||
alias = _resolve_alias(sys.argv[2], servers)
|
||||
s3_delete(servers[alias], sys.argv[3])
|
||||
sys.exit(0)
|
||||
|
||||
# ── Grafana commands ──
|
||||
if cmd == "--grafana-dashboards" and len(sys.argv) >= 3:
|
||||
_, servers = load_servers()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Version info for ServerManager."""
|
||||
|
||||
__version__ = "1.8.99"
|
||||
__version__ = "1.9.0"
|
||||
__app_name__ = "ServerManager"
|
||||
__author__ = "aibot777"
|
||||
__description__ = "Desktop GUI for managing remote servers"
|
||||
|
||||
Reference in New Issue
Block a user