用纯 Python搞定三件事:
一、自动下载/更新 llama.cpp 可执行文件
二、自动拉取 Qwen3-4B 权重并转 GGUF + 量化
三、自动生成start_server.py,双击即可起 OpenAI 兼容服务,客户浏览器打开http://localhost:8080就能聊天。
脚本只依赖标准库 +requests,无需 conda、docker、cmake,Windows/macOS/Linux 通用。你把整个文件夹压缩发给客户,他只要:
python deploy.py然后去喝咖啡,回来就能用。
目录结构(发客户前打包成 zip)
qwen3-deploy/
├─ deploy.py # 一键部署脚本(本文件)
├─ start_server.py # 启动服务脚本,部署完自动生成
└─ bin/ # 自动下载的 llama.cpp 可执行
├─ main
├─ server
└─ quantize
deploy.py (可直接复制运行)
#!/usr/bin/env python3 """ 一键部署 llama.cpp + Qwen3-4B-Q4_K_M > python deploy.py > python start_server.py # 部署完成后运行 """ import os, sys, zipfile, json, subprocess, shutil, platform, requests, hashlib from pathlib import Path URL_BASE = "https://github.com/ggerganov/llama.cpp/releases/download/b3616" # 写死最新 release MODEL_ID = "Qwen/Qwen3-4B" QUANT_TYPE = "Q4_K_M" CTX = 32768 NGPU_LAYERS = 35 # 默认全 offlod,老 GPU 可改小 PWD = Path(__file__).parent.resolve() BIN_DIR = PWD / "bin" MODEL_DIR = PWD / "model" GGUF_FP16 = MODEL_DIR / "Qwen3-4B-F16.gguf" GGUF_Q = MODEL_DIR / f"Qwen3-4B-{QUANT_TYPE}.gguf" def download(url: str, dst: Path, desc=""): """带进度条的下载""" resp = requests.get(url, stream=True, headers={'Accept-Encoding': None}) resp.raise_for_status() total = int(resp.headers.get('content-length', 0)) done = 0 with open(dst, "wb") as f: for chunk in resp.iter_content(chunk_size=1 << 20): f.write(chunk) done += len(chunk) if total: print(f"\r{desc} {done*100/total:.1f}%", end="", flush=True) print() def get_bin_name(): """根据平台返回 release 文件名""" arch = platform.machine().lower() sys_name = platform.system().lower() if sys_name == "darwin": return "llama-b3616-bin-macos-arm64.zip" if arch == "arm64" else "llama-b3616-bin-macos-x64.zip" if sys_name == "windows": return "llama-b3616-bin-win-avx-x64.zip" if "linux" in sys_name: return "llama-b3616-bin-ubuntu-x64.zip" raise RuntimeError("暂不支持的平台") def prepare_llamacpp(): """下载并解压可执行""" BIN_DIR.mkdir(exist_ok=True) zip_name = get_bin_name() zip_path = BIN_DIR / zip_name if not zip_path.exists(): url = f"{URL_BASE}/{zip_name}" print(">>> 下载 llama.cpp 可执行 …") download(url, zip_path, "llama.cpp") print(">>> 解压 …") with zipfile.ZipFile(zip_path) as zf: for member in zf.namelist(): if member.endswith(("main", "server", "quantize")) or member.endswith(".exe"): target = BIN_DIR / Path(member).name with zf.open(member) as src, open(target, "wb") as dst: dst.write(src.read()) target.chmod(0o755) # Linux/mac 需要可执行权限 print("✅ llama.cpp 就绪") def convert_to_gguf(): """下载 HF 权重 -> F16 GGUF""" MODEL_DIR.mkdir(exist_ok=True) if GGUF_Q.exists(): print("✅ 已存在量化模型,跳过转换") return print(">>> 下载 HF 权重(约 8 GB)…") subprocess.check_call([ sys.executable, "-m", "huggingface_hub", "download", MODEL_ID, "--local-dir", str(MODEL_DIR), "--resume-download" ], stdout=sys.stdout, stderr=sys.stderr) print(">>> 转换为 F16 GGUF …") subprocess.check_call([ sys.executable, "convert.py", str(MODEL_DIR), "--outfile", str(GGUF_FP16), "--outtype", "f16" ], cwd=PWD, stdout=sys.stdout, stderr=sys.stderr) print(">>> 量化 ->", QUANT_TYPE, "(约 2 min) …") subprocess.check_call([ str(BIN_DIR / "quantize"), str(GGUF_FP16), str(GGUF_Q), QUANT_TYPE ], stdout=sys.stdout, stderr=sys.stderr) # 可选:删除原始大文件 GGUF_FP16.unlink(missing_ok=True) print("✅ 量化完成,大小:", GGUF_Q.stat().st_size // 1024 // 1024, "MB") def gen_start_script(): """生成一键启动脚本""" pycode = f''' import os, sys, subprocess, webbrowser, time from pathlib import Path BIN = Path(__file__).with_name("bin") MODEL = Path(__file__).with_name("model") / "{GGUF_Q.name}" cmd = [str(BIN / "server"), "-m", str(MODEL), "-c", "{CTX}", "-ngl", "{NGPU_LAYERS}", "--host", "0.0.0.0", "--port", "8080"] print(">>> 启动服务 …") print(">>> 浏览器打开 http://localhost:8080 即可聊天") time.sleep(2) webbrowser.open("http://localhost:8080") subprocess.run(cmd) ''' (PWD / "start_server.py").write_text(pycode.strip(), encoding="utf8") print("✅ 生成 start_server.py,双击或在终端运行即可") def main(): try: prepare_llamacpp() convert_to_gguf() gen_start_script() print("\n🎉 部署完成!运行:") print(" python start_server.py") print("然后浏览器访问 http://localhost:8080") except Exception as e: print("❌ 错误:", e, file=sys.stderr) sys.exit(1) if __name__ == "__main__": main()四、使用步骤(客户侧)
你把整个文件夹压缩发过去。
客户解压后双击
deploy.py(或命令行python deploy.py)。脚本自动下载 llama.cpp 可执行(≈ 30 MB)
自动下载 Qwen3-4B(≈ 8 GB)并转量化(≈ 4 GB)
完成后提示“部署完成”。
再双击start_server.py,浏览器弹出
http://localhost:8080,即可聊天。
五、可定制点
| 需求 | 改这里 | |
| 换模型 | 改MODEL_ID = "Qwen/Qwen3-1_8B" | |
| 换量化 | 改QUANT_TYPE = "Q8_0" | |
| 老 GPU 显存小 | 改NGPU_LAYERS = 20 | |
| 要中文 UI |
|
六、一句话总结
把脚本丢给客户,“双击→等待→再双击”,llama.cpp + Qwen3-4B 服务就跑起来,完全零命令行,Python 一键搞定!