diff --git a/README.md b/README.md index 724fd594d6..1a80a18b7b 100644 --- a/README.md +++ b/README.md @@ -293,7 +293,7 @@ Then open **http://127.0.0.1:8088/** for the Console. Config, memory, and skills > -v copaw-secrets:/app/working.secret \ > agentscope/copaw:latest > ``` -> Then in CoPaw **Settings → Models → Ollama**, change the Base URL to `http://host.docker.internal:11434` or your corresponding port. +> Then in CoPaw **Settings → Models**, change the Base URL to `http://host.docker.internal:` — for example, `http://host.docker.internal:11434` for Ollama, or `http://host.docker.internal:1234/v1` for LM Studio. > > **Option B** — Host networking (Linux only): > ```bash diff --git a/README_zh.md b/README_zh.md index fed00f3085..b189a5a02e 100644 --- a/README_zh.md +++ b/README_zh.md @@ -295,7 +295,7 @@ docker run -p 127.0.0.1:8088:8088 \ > -v copaw-secrets:/app/working.secret \ > agentscope/copaw:latest > ``` -> 然后在 CoPaw **设置 → 模型 → Ollama** 中,将 Base URL 改为 `http://host.docker.internal:11434` 或对应端口。 +> 然后在 CoPaw **设置 → 模型** 中,将 Base URL 改为 `http://host.docker.internal:<端口>` — 例如 Ollama 填 `http://host.docker.internal:11434`,LM Studio 填 `http://host.docker.internal:1234/v1`。 > > **方式 B** — 使用宿主机网络(仅限 Linux): > ```bash diff --git a/console/src/locales/en.json b/console/src/locales/en.json index 5306816372..052516f182 100644 --- a/console/src/locales/en.json +++ b/console/src/locales/en.json @@ -353,6 +353,7 @@ "azureEndpointHint": "Azure OpenAI endpoint, e.g. https://.openai.azure.com/openai/v1", "anthropicEndpointHint": "Anthropic endpoint, e.g. https://api.anthropic.com", "ollamaEndpointHint": "Ollama endpoint, e.g. http://localhost:11434", + "lmstudioEndpointHint": "LM Studio endpoint, e.g. http://localhost:1234/v1", "apiEndpointHint": "API endpoint, e.g. https://api.example.com", "pleaseEnterBaseURL": "Please enter the API base URL", "pleaseEnterValidURL": "Please enter a valid URL", diff --git a/console/src/locales/ja.json b/console/src/locales/ja.json index 69176f2cf1..4a8be4f2d6 100644 --- a/console/src/locales/ja.json +++ b/console/src/locales/ja.json @@ -350,6 +350,7 @@ "azureEndpointHint": "Azure OpenAIエンドポイント、例: https://.openai.azure.com/openai/v1", "anthropicEndpointHint": "Anthropicエンドポイント、例: https://api.anthropic.com", "ollamaEndpointHint": "Ollamaエンドポイント、例: http://localhost:11434", + "lmstudioEndpointHint": "LM Studioエンドポイント、例: http://localhost:1234/v1", "apiEndpointHint": "APIエンドポイント、例: https://api.example.com", "pleaseEnterBaseURL": "APIベースURLを入力してください", "pleaseEnterValidURL": "有効なURLを入力してください", diff --git a/console/src/locales/ru.json b/console/src/locales/ru.json index b066b3927b..c89df2c4be 100644 --- a/console/src/locales/ru.json +++ b/console/src/locales/ru.json @@ -353,6 +353,7 @@ "azureEndpointHint": "Azure OpenAI endpoint, например https://.openai.azure.com/openai/v1", "anthropicEndpointHint": "Anthropic endpoint, например https://api.anthropic.com", "ollamaEndpointHint": "Ollama endpoint, например http://localhost:11434", + "lmstudioEndpointHint": "LM Studio endpoint, например http://localhost:1234/v1", "apiEndpointHint": "API endpoint, например https://api.example.com", "pleaseEnterBaseURL": "Пожалуйста, введите базовый URL API", "pleaseEnterValidURL": "Пожалуйста, введите корректный URL", diff --git a/console/src/locales/zh.json b/console/src/locales/zh.json index 3a7d6c3de2..e17587c3f5 100644 --- a/console/src/locales/zh.json +++ b/console/src/locales/zh.json @@ -353,6 +353,7 @@ "azureEndpointHint": "Azure OpenAI 端点,例如 https://.openai.azure.com/openai/v1", "anthropicEndpointHint": "Anthropic 端点,例如 https://api.anthropic.com", "ollamaEndpointHint": "Ollama 端点,例如 http://localhost:11434", + "lmstudioEndpointHint": "LM Studio 端点,例如 http://localhost:1234/v1", "apiEndpointHint": "API 端点,例如 https://api.example.com", "pleaseEnterBaseURL": "请输入 API 基础 URL", "pleaseEnterValidURL": "请输入有效的 URL", diff --git a/console/src/pages/Settings/Models/components/cards/RemoteProviderCard.tsx b/console/src/pages/Settings/Models/components/cards/RemoteProviderCard.tsx index a9ae620507..4157dbc7e4 100644 --- a/console/src/pages/Settings/Models/components/cards/RemoteProviderCard.tsx +++ b/console/src/pages/Settings/Models/components/cards/RemoteProviderCard.tsx @@ -66,7 +66,6 @@ export function RemoteProviderCard({ } else if (provider.is_custom && provider.base_url) { isConfigured = true; } else if (provider.require_api_key === false) { - // If API key is not required, consider it configured isConfigured = true; } else if (provider.require_api_key && provider.api_key) { isConfigured = true; diff --git a/console/src/pages/Settings/Models/components/modals/ProviderConfigModal.tsx b/console/src/pages/Settings/Models/components/modals/ProviderConfigModal.tsx index d0f30f433e..7adac809a0 100644 --- a/console/src/pages/Settings/Models/components/modals/ProviderConfigModal.tsx +++ b/console/src/pages/Settings/Models/components/modals/ProviderConfigModal.tsx @@ -328,6 +328,9 @@ export function ProviderConfigModal({ if (provider.id === "ollama") { return t("models.ollamaEndpointHint"); } + if (provider.id === "lmstudio") { + return t("models.lmstudioEndpointHint"); + } if (provider.is_custom) { return effectiveChatModel === "AnthropicChatModel" ? t("models.anthropicEndpointHint") @@ -352,6 +355,9 @@ export function ProviderConfigModal({ if (provider.id === "ollama") { return "http://localhost:11434"; } + if (provider.id === "lmstudio") { + return "http://localhost:1234/v1"; + } if (provider.is_custom && effectiveChatModel === "AnthropicChatModel") { return "https://api.anthropic.com"; } diff --git a/console/src/pages/Settings/Models/components/sections/ModelsSection.tsx b/console/src/pages/Settings/Models/components/sections/ModelsSection.tsx index ce8e3ddd0f..875deb41c1 100644 --- a/console/src/pages/Settings/Models/components/sections/ModelsSection.tsx +++ b/console/src/pages/Settings/Models/components/sections/ModelsSection.tsx @@ -51,7 +51,7 @@ export function ModelsSection({ (p.models?.length ?? 0) + (p.extra_models?.length ?? 0) > 0; if (!hasModels) return false; if (p.is_local) return true; - if (p.id === "ollama") return !!p.base_url; + if (p.require_api_key === false) return !!p.base_url; if (p.is_custom) return !!p.base_url; if (p.require_api_key ?? true) return !!p.api_key; return true; diff --git a/src/copaw/providers/lm_studio_provider.py b/src/copaw/providers/lm_studio_provider.py new file mode 100644 index 0000000000..2e80e31a0f --- /dev/null +++ b/src/copaw/providers/lm_studio_provider.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +"""An LM Studio provider implementation. + +LM Studio exposes an OpenAI-compatible local server (default +http://localhost:1234/v1). This provider auto-discovers loaded models +on each get_info() call, similar to OllamaProvider.""" + +from __future__ import annotations + +import logging + +from copaw.providers.openai_provider import OpenAIProvider +from copaw.providers.provider import ProviderInfo + +logger = logging.getLogger(__name__) + + +class LMStudioProvider(OpenAIProvider): + """Provider for LM Studio's OpenAI-compatible local server.""" + + async def get_info(self, mock_secret: bool = True) -> ProviderInfo: + try: + models = await self.fetch_models(timeout=1) + self.models = models + except Exception as exc: + logger.debug("LM Studio model discovery failed: %s", exc) + return await super().get_info(mock_secret=mock_secret) diff --git a/src/copaw/providers/provider_manager.py b/src/copaw/providers/provider_manager.py index b707d8e208..7545031834 100644 --- a/src/copaw/providers/provider_manager.py +++ b/src/copaw/providers/provider_manager.py @@ -19,6 +19,7 @@ ProviderInfo, ) from copaw.providers.openai_provider import OpenAIProvider +from copaw.providers.lm_studio_provider import LMStudioProvider from copaw.providers.anthropic_provider import AnthropicProvider from copaw.providers.ollama_provider import OllamaProvider from copaw.constant import SECRET_DIR @@ -159,6 +160,15 @@ require_api_key=False, ) +PROVIDER_LMSTUDIO = LMStudioProvider( + id="lmstudio", + name="LM Studio", + base_url="http://localhost:1234/v1", + require_api_key=False, + api_key_prefix="", + models=[], +) + class ModelSlotConfig(BaseModel): provider_id: str = Field( @@ -216,6 +226,7 @@ def _init_builtins(self): self._add_builtin(PROVIDER_AZURE_OPENAI) self._add_builtin(PROVIDER_ANTHROPIC) self._add_builtin(PROVIDER_OLLAMA) + self._add_builtin(PROVIDER_LMSTUDIO) self._add_builtin(PROVIDER_LLAMACPP) self._add_builtin(PROVIDER_MLX) @@ -419,6 +430,8 @@ def _provider_from_data(self, data: Dict) -> Provider: return AnthropicProvider.model_validate(data) if provider_id == "ollama" or chat_model == "OllamaChatModel": return OllamaProvider.model_validate(data) + if provider_id == "lmstudio": + return LMStudioProvider.model_validate(data) if data.get("is_local", False): return DefaultProvider.model_validate(data) return OpenAIProvider.model_validate(data) diff --git a/website/public/docs/config.en.md b/website/public/docs/config.en.md index 7cde01e2c2..8c5c973a66 100644 --- a/website/public/docs/config.en.md +++ b/website/public/docs/config.en.md @@ -288,11 +288,17 @@ CoPaw needs an LLM provider to work. You can set it up in three ways: ### Built-in providers -| Provider | ID | Default Base URL | API Key Prefix | -| ---------- | ------------ | --------------------------------------------------- | -------------- | -| ModelScope | `modelscope` | `https://api-inference.modelscope.cn/v1` | `ms` | -| DashScope | `dashscope` | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `sk` | -| Custom | `custom` | _(you set it)_ | _(any)_ | +| Provider | ID | Default Base URL | API Key Prefix | +| ------------------ | ------------------- | --------------------------------------------------- | -------------- | +| ModelScope | `modelscope` | `https://api-inference.modelscope.cn/v1` | `ms` | +| DashScope | `dashscope` | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `sk` | +| Aliyun Coding Plan | `aliyun-codingplan` | `https://coding.dashscope.aliyuncs.com/v1` | `sk-sp` | +| OpenAI | `openai` | `https://api.openai.com/v1` | _(any)_ | +| Azure OpenAI | `azure-openai` | _(you set it)_ | _(any)_ | +| Anthropic | `anthropic` | `https://api.anthropic.com` | _(any)_ | +| Ollama | `ollama` | `http://localhost:11434` | _(none)_ | +| LM Studio | `lmstudio` | `http://localhost:1234/v1` | _(none)_ | +| Custom | `custom` | _(you set it)_ | _(any)_ | For each provider you need to set: diff --git a/website/public/docs/config.zh.md b/website/public/docs/config.zh.md index 5dfd95ee48..82e56c4df0 100644 --- a/website/public/docs/config.zh.md +++ b/website/public/docs/config.zh.md @@ -278,11 +278,17 @@ CoPaw 需要 LLM 提供商才能运行。有三种设置方式: ### 内置提供商 -| 提供商 | ID | 默认 Base URL | API Key 前缀 | -| ------------------ | ------------ | --------------------------------------------------- | ------------ | -| ModelScope(魔搭) | `modelscope` | `https://api-inference.modelscope.cn/v1` | `ms` | -| DashScope(灵积) | `dashscope` | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `sk` | -| 自定义 | `custom` | _(你自己填)_ | _(任意)_ | +| 提供商 | ID | 默认 Base URL | API Key 前缀 | +| ---------------------- | ------------------- | --------------------------------------------------- | ------------ | +| ModelScope(魔搭) | `modelscope` | `https://api-inference.modelscope.cn/v1` | `ms` | +| DashScope(灵积) | `dashscope` | `https://dashscope.aliyuncs.com/compatible-mode/v1` | `sk` | +| 阿里云百炼 Coding Plan | `aliyun-codingplan` | `https://coding.dashscope.aliyuncs.com/v1` | `sk-sp` | +| OpenAI | `openai` | `https://api.openai.com/v1` | _(任意)_ | +| Azure OpenAI | `azure-openai` | _(你自己填)_ | _(任意)_ | +| Anthropic | `anthropic` | `https://api.anthropic.com` | _(任意)_ | +| Ollama | `ollama` | `http://localhost:11434` | _(无需)_ | +| LM Studio | `lmstudio` | `http://localhost:1234/v1` | _(无需)_ | +| 自定义 | `custom` | _(你自己填)_ | _(任意)_ | 每个提供商需要设置: diff --git a/website/public/docs/console.en.md b/website/public/docs/console.en.md index 72378662fb..5e5ebcaef4 100644 --- a/website/public/docs/console.en.md +++ b/website/public/docs/console.en.md @@ -327,6 +327,28 @@ automatically when models are added/removed via Ollama CLI or Console. > `copaw models ollama-list`, `copaw models ollama-remove`. See > [CLI](./cli#ollama-models). +### LM Studio provider + +The LM Studio provider connects to the LM Studio desktop application's +OpenAI-compatible local server to discover and use loaded models. + +**Prerequisites:** + +- Install LM Studio from [lmstudio.ai](https://lmstudio.ai) +- Load a model and start the local server in LM Studio (default: `http://localhost:1234`) + +**Configure:** + +1. Click **Settings** on the LM Studio provider card. +2. The default Base URL is `http://localhost:1234/v1`. Adjust if needed, then + click **Save**. +3. Click **Manage Models** to see models loaded in LM Studio. You can also + manually add model IDs. +4. Select **LM Studio** in the **Provider** dropdown and pick a model. + +> LM Studio does not require an API key by default. Models must be loaded +> in LM Studio before they appear in CoPaw. + ### Choose the active model 1. In the **LLM Config** section, select a **Provider** from the dropdown @@ -375,18 +397,18 @@ Select rows → click **Delete** in the toolbar → confirm. ## Quick Reference -| Page | Sidebar path | What you can do | -| --------------------- | -------------------------------- | ---------------------------------------------------- | -| Chat | Chat → Chat | Talk with CoPaw, manage sessions | -| Channels | Control → Channels | Enable/disable channels, configure credentials | -| Sessions | Control → Sessions | Filter, rename, delete sessions | -| Cron Jobs | Control → Cron Jobs | Create/edit/delete jobs, run immediately | -| Workspace | Agent → Workspace | Edit persona files, view memory, upload/download | -| Skills | Agent → Skills | Enable/disable/create/delete skills | -| MCP | Agent → MCP | Enable/disable/create/delete MCP clients | -| Runtime Config | Agent → Runtime Config | Modify runtime configuration | -| Models | Settings → Models | Configure providers, manage local/Ollama, pick model | -| Environment Variables | Settings → Environment Variables | Add/edit/delete environment variables | +| Page | Sidebar path | What you can do | +| --------------------- | -------------------------------- | -------------------------------------------------------------- | +| Chat | Chat → Chat | Talk with CoPaw, manage sessions | +| Channels | Control → Channels | Enable/disable channels, configure credentials | +| Sessions | Control → Sessions | Filter, rename, delete sessions | +| Cron Jobs | Control → Cron Jobs | Create/edit/delete jobs, run immediately | +| Workspace | Agent → Workspace | Edit persona files, view memory, upload/download | +| Skills | Agent → Skills | Enable/disable/create/delete skills | +| MCP | Agent → MCP | Enable/disable/create/delete MCP clients | +| Runtime Config | Agent → Runtime Config | Modify runtime configuration | +| Models | Settings → Models | Configure providers, manage local/Ollama/LM Studio, pick model | +| Environment Variables | Settings → Environment Variables | Add/edit/delete environment variables | --- diff --git a/website/public/docs/console.zh.md b/website/public/docs/console.zh.md index c48bda9429..0dcd9d7932 100644 --- a/website/public/docs/console.zh.md +++ b/website/public/docs/console.zh.md @@ -309,6 +309,24 @@ Ollama 提供商集成本地 Ollama 守护进程,动态加载其中的模型 > 也可以通过 CLI 管理 Ollama 模型:`copaw models ollama-pull`、`copaw models ollama-list`、`copaw models ollama-remove`。详见 [CLI](./cli#ollama-模型)。 +### LM Studio 提供商 + +LM Studio 提供商连接 LM Studio 桌面应用内置的 OpenAI 兼容本地服务器,自动发现并使用已加载的模型。 + +**前置条件:** + +- 从 [lmstudio.ai](https://lmstudio.ai) 安装 LM Studio +- 在 LM Studio 中加载模型并启动本地服务器(默认地址:`http://localhost:1234`) + +**配置步骤:** + +1. 点击 LM Studio 提供商卡片的 **设置** 按钮。 +2. 默认 Base URL 为 `http://localhost:1234/v1`,如有需要可修改,点击 **保存**。 +3. 点击 **模型** 查看 LM Studio 中当前已加载的模型,也可手动添加模型 ID。 +4. 在 **提供商** 下拉菜单中选择 LM Studio,选择模型后点击 **保存**。 + +> LM Studio 默认不需要 API Key。模型必须在 LM Studio 中加载后才会在 CoPaw 中显示。 + ### 选择活跃模型 1. 在顶部**LLM配置**的**提供商**下拉菜单中选择一个提供商(只显示已授权或 @@ -354,18 +372,18 @@ Ollama 提供商集成本地 Ollama 守护进程,动态加载其中的模型 ## 快速索引 -| 页面 | 侧边栏路径 | 你能做什么 | -| -------- | ----------------- | -------------------------------------------------- | -| 聊天 | 聊天 → 聊天 | 和 CoPaw 对话、管理会话 | -| 频道 | 控制 → 频道 | 启用/禁用频道、填入凭据 | -| 会话 | 控制 → 会话 | 筛选、重命名、删除会话 | -| 定时任务 | 控制 → 定时任务 | 创建/编辑/删除任务、立即执行 | -| 工作区 | 智能体 → 工作区 | 编辑人设文件、查看记忆、上传/下载 | -| 技能 | 智能体 → 技能 | 启用/禁用/创建/删除技能 | -| MCP | 智能体 → MCP | 启用/禁用/创建/删除MCP | -| 运行配置 | 智能体 → 运行配置 | 修改运行配置 | -| 模型 | 设置 → 模型 | 配置提供商 API Key、管理本地/Ollama 模型、选择模型 | -| 环境变量 | 设置 → 环境变量 | 添加/编辑/删除环境变量 | +| 页面 | 侧边栏路径 | 你能做什么 | +| -------- | ----------------- | ------------------------------------------------------------ | +| 聊天 | 聊天 → 聊天 | 和 CoPaw 对话、管理会话 | +| 频道 | 控制 → 频道 | 启用/禁用频道、填入凭据 | +| 会话 | 控制 → 会话 | 筛选、重命名、删除会话 | +| 定时任务 | 控制 → 定时任务 | 创建/编辑/删除任务、立即执行 | +| 工作区 | 智能体 → 工作区 | 编辑人设文件、查看记忆、上传/下载 | +| 技能 | 智能体 → 技能 | 启用/禁用/创建/删除技能 | +| MCP | 智能体 → MCP | 启用/禁用/创建/删除MCP | +| 运行配置 | 智能体 → 运行配置 | 修改运行配置 | +| 模型 | 设置 → 模型 | 配置提供商 API Key、管理本地/Ollama/LM Studio 模型、选择模型 | +| 环境变量 | 设置 → 环境变量 | 添加/编辑/删除环境变量 | --- diff --git a/website/public/docs/models.en.md b/website/public/docs/models.en.md index 971174d9dd..702167f01f 100644 --- a/website/public/docs/models.en.md +++ b/website/public/docs/models.en.md @@ -4,7 +4,7 @@ You need to configure a model before chatting with CoPaw. You can do this under ![Console models](https://img.alicdn.com/imgextra/i1/O1CN01zHAE1Z26w6jXl2xbr_!!6000000007725-2-tps-3802-1968.png) -CoPaw supports multiple LLM providers: **cloud providers** (require API Key), **local providers** (llama.cpp / MLX), and **Ollama provider**, and you can add **custom providers**. This page explains how to configure each type. +CoPaw supports multiple LLM providers: **cloud providers** (require API Key), **local providers** (llama.cpp / MLX), **Ollama provider**, **LM Studio provider**, and you can add **custom providers**. This page explains how to configure each type. --- @@ -115,6 +115,31 @@ The Ollama provider uses the **Ollama daemon** installed on your machine. Models > If you see `Ollama SDK not installed. Install with: pip install 'copaw[ollama]'`, make sure Ollama is installed from ollama.com and you’ve run `pip install 'copaw[ollama]'` in CoPaw’s environment. To remove a model, click **Models** on the Ollama card, then the **trash icon** next to the model and confirm. > > ![delete](https://img.alicdn.com/imgextra/i1/O1CN01OvNNu21shXVzD14go_!!6000000005798-2-tps-3802-1968.png) +> +> **Docker users:** If CoPaw runs inside a Docker container, `localhost` refers to the container — not your host machine. Change the Ollama Base URL to `http://host.docker.internal:11434` (and add `--add-host=host.docker.internal:host-gateway` to your `docker run` command). See the [Docker section in the README](https://github.com/agentscope-ai/CoPaw#using-docker) for details. + +## LM Studio provider + +The LM Studio provider connects to the **LM Studio** desktop application's built-in OpenAI-compatible server. Models are managed in the LM Studio GUI; CoPaw discovers loaded models automatically via the `/v1/models` endpoint. + +**Prerequisites:** + +- Install LM Studio from [lmstudio.ai](https://lmstudio.ai). +- In LM Studio, load a model and start the local server (default: `http://localhost:1234`). + +1. On the Models page you'll see the LM Studio provider card. + +2. Click **Settings** at the bottom right. The default Base URL is `http://localhost:1234/v1`. Adjust if you changed the port in LM Studio. Click **Save**. + +3. Click **Models** to view models currently loaded in LM Studio. You can also manually add a model ID if needed. + +4. In **LLM Configuration** at the top, select **LM Studio** in the **Provider** dropdown and your model in the **Model** dropdown, then click **Save**. + +> **Tip:** LM Studio does not require an API key by default. If you have enabled authentication in LM Studio, enter the key in the **API Key** field. Models must be loaded in LM Studio's GUI before they appear in CoPaw. +> +> **Important — Context Length:** LM Studio loads models with a small default context length (often 2048 or 4096 tokens). CoPaw's system prompt (AGENTS.md + SOUL.md + PROFILE.md) can easily exceed this limit, causing an error like _"The number of tokens to keep from the initial prompt is greater than the context length"_. To fix this, **unload the model in LM Studio and reload it with a larger context length** (16384 or above is recommended). You can do this in the LM Studio GUI (Model Settings → Context Length) or via the CLI: `lms unload --all && lms load -c 16384`. +> +> **Docker users:** If CoPaw runs inside a Docker container, `localhost` refers to the container — not your host machine. Change the LM Studio Base URL to `http://host.docker.internal:1234/v1` (and add `--add-host=host.docker.internal:host-gateway` to your `docker run` command). See the [Docker section in the README](https://github.com/agentscope-ai/CoPaw#using-docker) for details. ## Add custom provider diff --git a/website/public/docs/models.zh.md b/website/public/docs/models.zh.md index c7780e1749..cfce57af68 100644 --- a/website/public/docs/models.zh.md +++ b/website/public/docs/models.zh.md @@ -4,7 +4,7 @@ ![控制台模型](https://img.alicdn.com/imgextra/i4/O1CN01XnOPPQ1c99vox3I88_!!6000000003557-2-tps-3786-1980.png) -CoPaw 支持多种 LLM 提供商:**云提供商**(需 API Key)、**本地提供商**(llama.cpp / MLX)和 **Ollama 提供商**,且支持添加自定义 **提供商**。本文介绍这几类提供商的配置方式。 +CoPaw 支持多种 LLM 提供商:**云提供商**(需 API Key)、**本地提供商**(llama.cpp / MLX)、**Ollama 提供商**、**LM Studio 提供商**,且支持添加自定义 **提供商**。本文介绍这几类提供商的配置方式。 --- @@ -114,8 +114,33 @@ Ollama 提供商对接本机安装的 **Ollama 守护进程**,使用其中的 > 如果在过程中遇到 `Ollama SDK not installed. Install with: pip install 'copaw[ollama]'`的提示,请先确认是否已经在 ollama.com 下载 Ollama,并在 CoPaw所在虚拟环境中执行过 `pip install 'copaw[ollama]'`。如果想删除某个模型,点击 Ollama 卡片右下角的 **模型**,在模型列表中,点击想要删除的模型右侧的 **垃圾桶按钮**,二次确认后即可删除。 > +> **Docker 用户:** 如果 CoPaw 运行在 Docker 容器中,`localhost` 指向的是容器自身而非宿主机。请将 Ollama 的 Base URL 改为 `http://host.docker.internal:11434`(并在 `docker run` 命令中添加 `--add-host=host.docker.internal:host-gateway`)。详见 [README 的 Docker 章节](https://github.com/agentscope-ai/CoPaw#使用-docker)。 +> > ![delete](https://img.alicdn.com/imgextra/i2/O1CN01p2o85m1Ul9rkY87PS_!!6000000002557-2-tps-3802-1968.png) +## LM Studio 提供商 + +LM Studio 提供商连接 **LM Studio** 桌面应用内置的 OpenAI 兼容服务器。模型在 LM Studio 的图形界面中管理,CoPaw 通过 `/v1/models` 端点自动发现已加载的模型。 + +**前置条件:** + +- 从 [lmstudio.ai](https://lmstudio.ai) 安装 LM Studio。 +- 在 LM Studio 中加载模型并启动本地服务器(默认地址:`http://localhost:1234`)。 + +1. 在控制台的模型页面中,可以看到 LM Studio 提供商对应的卡片。 + +2. 点击右下角 **设置**,默认 Base URL 为 `http://localhost:1234/v1`。如果你在 LM Studio 中修改了端口,请相应调整。点击 **保存**。 + +3. 点击 **模型** 查看 LM Studio 中当前已加载的模型。如有需要,也可以手动添加模型 ID。 + +4. 在上方的 **LLM 配置** 中,**提供商** 对应的下拉菜单中选择 LM Studio,**模型** 对应的下拉菜单中选择想使用的模型。点击 **保存**。 + +> **提示:** LM Studio 默认不需要 API Key。如果你在 LM Studio 中启用了认证功能,请在 **API Key** 字段中填入对应的密钥。模型必须在 LM Studio 的图形界面中加载后才会在 CoPaw 中显示。 +> +> **重要 — 上下文长度:** LM Studio 加载模型时默认的上下文长度较小(通常为 2048 或 4096 tokens)。CoPaw 的系统提示词(AGENTS.md + SOUL.md + PROFILE.md)可能会超过此限制,导致报错 _"The number of tokens to keep from the initial prompt is greater than the context length"_。解决方法:**在 LM Studio 中卸载模型,然后以更大的上下文长度重新加载**(建议 16384 及以上)。可以在 LM Studio 图形界面中调整(模型设置 → Context Length),也可以通过 CLI 操作:`lms unload --all && lms load -c 16384`。 +> +> **Docker 用户:** 如果 CoPaw 运行在 Docker 容器中,`localhost` 指向的是容器自身而非宿主机。请将 LM Studio 的 Base URL 改为 `http://host.docker.internal:1234/v1`(并在 `docker run` 命令中添加 `--add-host=host.docker.internal:host-gateway`)。详见 [README 的 Docker 章节](https://github.com/agentscope-ai/CoPaw#使用-docker)。 + ## 添加自定义提供商 1. 在控制台的模型页面点击 **添加提供商**。