v1.9.0: S3 server type — bucket/object browser, drag-and-drop upload, resilient transfers

New server type: S3 (MinIO, AWS, any S3-compatible storage)
- core/s3_client.py: boto3 client with auto-reconnect, 10 retries, exponential backoff, multipart upload/download, tcp_keepalive
- gui/tabs/s3_tab.py: object browser (Treeview), bucket selector, folder navigation, drag-and-drop upload from Explorer (windnd), progress bar with %, multi-file upload
- CLI: --s3-buckets, --s3-ls, --s3-upload, --s3-download, --s3-delete with retry
- ServerDialog: access_key, secret_key, bucket fields
- Registration: server_store, connection_factory, status_checker, icons, app, i18n (EN/RU/ZH)
- Fix: build.py cleanup_old_releases now sorts by semver (was lexicographic, broke v1.8.100+)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
chrome-storm-c442
2026-03-03 06:32:03 -05:00
parent f2dc978c57
commit 9b0e4c76a3
19 changed files with 1277 additions and 8 deletions

View File

@@ -14,7 +14,7 @@ Usage (SSH):
python ssh.py --status
python ssh.py --info ALIAS # full info (no passwords)
python ssh.py --set-note ALIAS "desc" # update server notes
python ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp] [--note "desc"] [--database DB] [--token TOKEN]
python ssh.py --add ALIAS IP PORT USER PASSWORD [--type ssh|telnet|mariadb|mssql|postgresql|redis|grafana|prometheus|winrm|rdp|s3] [--note "desc"] [--database DB] [--token TOKEN]
python ssh.py --remove ALIAS
SQL (type: mariadb / mssql / postgresql):
@@ -36,6 +36,13 @@ Prometheus (type: prometheus):
python ssh.py --prom-targets ALIAS # list targets
python ssh.py --prom-alerts ALIAS # list alerts
S3 (type: s3):
python ssh.py --s3-buckets ALIAS # list buckets
python ssh.py --s3-ls ALIAS [bucket[/prefix]] # list objects
python ssh.py --s3-upload ALIAS local bucket/key # upload file
python ssh.py --s3-download ALIAS bucket/key local # download file
python ssh.py --s3-delete ALIAS bucket/key # delete object
WinRM (type: winrm):
python ssh.py --ps ALIAS "Get-Process" # PowerShell via WinRM
python ssh.py --cmd ALIAS "dir" # CMD via WinRM
@@ -964,7 +971,7 @@ def add_server(args):
i += 1
# Validate server type
valid_types = ["ssh", "telnet", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus", "winrm", "rdp"]
valid_types = ["ssh", "telnet", "mariadb", "mssql", "postgresql", "redis", "grafana", "prometheus", "winrm", "rdp", "s3"]
if stype not in valid_types:
print(f"ERROR: Invalid server type '{stype}'. Valid types: {', '.join(valid_types)}")
sys.exit(1)
@@ -991,6 +998,13 @@ def add_server(args):
elif stype in ["redis", "grafana", "prometheus"]:
if token:
new_server["token"] = token
elif stype == "s3":
# S3: user=access_key, password=secret_key, ip=endpoint
new_server["access_key"] = user
new_server["secret_key"] = password
new_server["use_ssl"] = True
if database:
new_server["bucket"] = database
elif stype in ["winrm", "rdp"]:
# WinRM/RDP may have additional auth fields
new_server["auth_method"] = "password" # default
@@ -1254,6 +1268,197 @@ def redis_keys(server: dict, pattern: str):
r.close()
# ── S3 commands ──────────────────────────────────
def _get_s3_client(server: dict):
"""Create and connect a boto3 S3 client from server dict."""
try:
import boto3
import botocore.config
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
print("ERROR: boto3 not installed. Run: pip install boto3", file=sys.stderr)
sys.exit(1)
endpoint = server.get("ip", "")
if endpoint and not endpoint.startswith("http"):
use_ssl = server.get("use_ssl", True)
scheme = "https" if use_ssl else "http"
port = int(server.get("port", 443))
if (scheme == "https" and port == 443) or (scheme == "http" and port == 80):
endpoint = f"{scheme}://{endpoint}"
else:
endpoint = f"{scheme}://{endpoint}:{port}"
config = botocore.config.Config(
signature_version="s3v4",
connect_timeout=15,
read_timeout=60,
retries={"max_attempts": 5, "mode": "adaptive"},
tcp_keepalive=True,
)
return boto3.client(
"s3",
endpoint_url=endpoint,
aws_access_key_id=server.get("access_key", ""),
aws_secret_access_key=server.get("secret_key", ""),
config=config,
verify=False,
)
def s3_buckets(server: dict):
"""List all S3 buckets."""
client = _get_s3_client(server)
try:
resp = client.list_buckets()
buckets = resp.get("Buckets", [])
if not buckets:
print("(no buckets)")
return
print(f"{'Name':<40} {'Created'}")
print("-" * 65)
for b in buckets:
created = b.get("CreationDate", "")
if created:
created = created.strftime("%Y-%m-%d %H:%M:%S")
print(f"{b['Name']:<40} {created}")
print(f"\n({len(buckets)} bucket{'s' if len(buckets) != 1 else ''})")
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(1)
def s3_ls(server: dict, path: str = ""):
"""List objects in a bucket[/prefix]."""
client = _get_s3_client(server)
# Parse bucket/prefix from path
parts = path.split("/", 1) if path else []
bucket = parts[0] if parts else server.get("bucket", "")
prefix = parts[1] if len(parts) > 1 else ""
if not bucket:
print("ERROR: No bucket specified. Usage: --s3-ls ALIAS bucket[/prefix]", file=sys.stderr)
sys.exit(1)
try:
paginator = client.get_paginator("list_objects_v2")
kwargs = {"Bucket": bucket, "Delimiter": "/"}
if prefix:
if not prefix.endswith("/"):
prefix += "/"
kwargs["Prefix"] = prefix
total = 0
for page in paginator.paginate(**kwargs):
for cp in page.get("CommonPrefixes", []):
p = cp["Prefix"]
if prefix:
p = p[len(prefix):]
print(f" DIR {p}")
total += 1
for obj in page.get("Contents", []):
key = obj["Key"]
if key == prefix:
continue
name = key[len(prefix):] if prefix else key
size = obj.get("Size", 0)
modified = obj.get("LastModified", "")
if modified:
modified = modified.strftime("%Y-%m-%d %H:%M")
print(f"{size:>10} {modified} {name}")
total += 1
print(f"\n({total} item{'s' if total != 1 else ''})")
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(1)
def _s3_transfer_config():
from boto3.s3.transfer import TransferConfig
return TransferConfig(
multipart_threshold=8 * 1024 * 1024,
multipart_chunksize=8 * 1024 * 1024,
max_concurrency=4,
num_download_attempts=10,
)
def s3_upload(server: dict, local_path: str, remote_path: str):
"""Upload a file to S3 with retry and resume."""
# Parse bucket/key
parts = remote_path.split("/", 1)
bucket = parts[0] if parts else server.get("bucket", "")
key = parts[1] if len(parts) > 1 else os.path.basename(local_path)
if not bucket:
print("ERROR: No bucket. Usage: --s3-upload ALIAS local bucket/key", file=sys.stderr)
sys.exit(1)
if not os.path.isfile(local_path):
print(f"ERROR: File not found: {local_path}", file=sys.stderr)
sys.exit(1)
size = os.path.getsize(local_path)
config = _s3_transfer_config()
max_retries = 10
for attempt in range(max_retries):
client = _get_s3_client(server)
try:
print(f"Uploading {local_path} -> s3://{bucket}/{key} ({size} bytes)...")
client.upload_file(local_path, bucket, key, Config=config)
print("OK")
return
except Exception as e:
delay = min(2 * (2 ** attempt), 60)
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}", file=sys.stderr)
if attempt < max_retries - 1:
print(f"Retrying in {delay}s...", file=sys.stderr)
time.sleep(delay)
else:
print(f"ERROR: Upload failed after {max_retries} attempts", file=sys.stderr)
sys.exit(1)
def s3_download(server: dict, remote_path: str, local_path: str):
"""Download an object from S3 with retry."""
parts = remote_path.split("/", 1)
bucket = parts[0] if parts else server.get("bucket", "")
key = parts[1] if len(parts) > 1 else ""
if not bucket or not key:
print("ERROR: Usage: --s3-download ALIAS bucket/key local_path", file=sys.stderr)
sys.exit(1)
config = _s3_transfer_config()
max_retries = 10
for attempt in range(max_retries):
client = _get_s3_client(server)
try:
print(f"Downloading s3://{bucket}/{key} -> {local_path}...")
client.download_file(bucket, key, local_path, Config=config)
size = os.path.getsize(local_path)
print(f"OK ({size} bytes)")
return
except Exception as e:
delay = min(2 * (2 ** attempt), 60)
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}", file=sys.stderr)
if attempt < max_retries - 1:
print(f"Retrying in {delay}s...", file=sys.stderr)
time.sleep(delay)
else:
print(f"ERROR: Download failed after {max_retries} attempts", file=sys.stderr)
sys.exit(1)
def s3_delete(server: dict, remote_path: str):
"""Delete an object from S3."""
client = _get_s3_client(server)
parts = remote_path.split("/", 1)
bucket = parts[0] if parts else server.get("bucket", "")
key = parts[1] if len(parts) > 1 else ""
if not bucket or not key:
print("ERROR: Usage: --s3-delete ALIAS bucket/key", file=sys.stderr)
sys.exit(1)
try:
client.delete_object(Bucket=bucket, Key=key)
print(f"Deleted s3://{bucket}/{key}")
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(1)
# ── Grafana commands ──────────────────────────────────
def _grafana_request(server: dict, endpoint: str) -> dict:
@@ -1531,6 +1736,34 @@ def main():
redis_keys(servers[alias], sys.argv[3])
sys.exit(0)
# ── S3 commands ──
if cmd == "--s3-buckets" and len(sys.argv) >= 3:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
s3_buckets(servers[alias])
sys.exit(0)
if cmd == "--s3-ls" and len(sys.argv) >= 3:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
path = sys.argv[3] if len(sys.argv) >= 4 else ""
s3_ls(servers[alias], path)
sys.exit(0)
if cmd == "--s3-upload" and len(sys.argv) >= 5:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
s3_upload(servers[alias], sys.argv[3], sys.argv[4])
sys.exit(0)
if cmd == "--s3-download" and len(sys.argv) >= 5:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
s3_download(servers[alias], sys.argv[3], sys.argv[4])
sys.exit(0)
if cmd == "--s3-delete" and len(sys.argv) >= 4:
_, servers = load_servers()
alias = _resolve_alias(sys.argv[2], servers)
s3_delete(servers[alias], sys.argv[3])
sys.exit(0)
# ── Grafana commands ──
if cmd == "--grafana-dashboards" and len(sys.argv) >= 3:
_, servers = load_servers()