diff --git a/apps/cli/src/lib/utils/context-window.ts b/apps/cli/src/lib/utils/context-window.ts index 5cd58b55a8f..0c7f6053ced 100644 --- a/apps/cli/src/lib/utils/context-window.ts +++ b/apps/cli/src/lib/utils/context-window.ts @@ -42,6 +42,8 @@ function getModelIdForProvider(config: ProviderSettings): string | undefined { return config.ollamaModelId case "lmstudio": return config.lmStudioModelId + case "atomic-chat": + return config.atomicChatModelId case "openai": return config.openAiModelId case "requesty": diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index ac9471f2c67..610ee5f9bcb 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -255,6 +255,7 @@ export const SECRET_STATE_KEYS = [ "awsSessionToken", "openAiApiKey", "ollamaApiKey", + "atomicChatApiKey", "geminiApiKey", "openAiNativeApiKey", "deepSeekApiKey", diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 1b1da702ffc..284d1dc1a77 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -47,7 +47,7 @@ export const isDynamicProvider = (key: string): key is DynamicProvider => * Local providers require localhost API calls in order to get the model list. */ -export const localProviders = ["ollama", "lmstudio"] as const +export const localProviders = ["ollama", "lmstudio", "atomic-chat"] as const export type LocalProvider = (typeof localProviders)[number] @@ -274,6 +274,12 @@ const lmStudioSchema = baseProviderSettingsSchema.extend({ lmStudioSpeculativeDecodingEnabled: z.boolean().optional(), }) +const atomicChatSchema = baseProviderSettingsSchema.extend({ + atomicChatModelId: z.string().optional(), + atomicChatBaseUrl: z.string().optional(), + atomicChatApiKey: z.string().optional(), +}) + const geminiSchema = apiModelIdProviderModelSchema.extend({ geminiApiKey: z.string().optional(), googleGeminiBaseUrl: z.string().optional(), @@ -394,6 +400,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv ollamaSchema.merge(z.object({ apiProvider: z.literal("ollama") })), vsCodeLmSchema.merge(z.object({ apiProvider: z.literal("vscode-lm") })), lmStudioSchema.merge(z.object({ apiProvider: z.literal("lmstudio") })), + atomicChatSchema.merge(z.object({ apiProvider: z.literal("atomic-chat") })), geminiSchema.merge(z.object({ apiProvider: z.literal("gemini") })), geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })), openAiCodexSchema.merge(z.object({ apiProvider: z.literal("openai-codex") })), @@ -427,6 +434,7 @@ export const providerSettingsSchema = z.object({ ...ollamaSchema.shape, ...vsCodeLmSchema.shape, ...lmStudioSchema.shape, + ...atomicChatSchema.shape, ...geminiSchema.shape, ...geminiCliSchema.shape, ...openAiCodexSchema.shape, @@ -473,6 +481,7 @@ export const modelIdKeys = [ "ollamaModelId", "lmStudioModelId", "lmStudioDraftModelId", + "atomicChatModelId", "requestyModelId", "unboundModelId", "litellmModelId", @@ -504,6 +513,7 @@ export const modelIdKeysByProvider: Record = { "openai-native": "openAiModelId", ollama: "ollamaModelId", lmstudio: "lmStudioModelId", + "atomic-chat": "atomicChatModelId", gemini: "apiModelId", "gemini-cli": "apiModelId", mistral: "apiModelId", @@ -636,4 +646,5 @@ export const MODELS_BY_PROVIDER: Record< // Local providers; models discovered from localhost endpoints. lmstudio: { id: "lmstudio", label: "LM Studio", models: [] }, ollama: { id: "ollama", label: "Ollama", models: [] }, + "atomic-chat": { id: "atomic-chat", label: "Atomic Chat", models: [] }, } diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 243ecd8d4ed..07744fc04e3 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -97,6 +97,8 @@ export function getProviderDefaultModelId( return "" // Ollama uses dynamic model selection case "lmstudio": return "" // LMStudio uses dynamic model selection + case "atomic-chat": + return "" // Atomic Chat uses dynamic model selection case "vscode-lm": return vscodeLlmDefaultModelId case "sambanova": diff --git a/packages/types/src/vscode-extension-host.ts b/packages/types/src/vscode-extension-host.ts index a4ef802efbc..e71f31b1dcf 100644 --- a/packages/types/src/vscode-extension-host.ts +++ b/packages/types/src/vscode-extension-host.ts @@ -39,6 +39,7 @@ export interface ExtensionMessage { | "openAiModels" | "ollamaModels" | "lmStudioModels" + | "atomicChatModels" | "vsCodeLmModels" | "vsCodeLmApiAvailable" | "updatePrompt" @@ -126,6 +127,7 @@ export interface ExtensionMessage { openAiModels?: string[] ollamaModels?: ModelRecord lmStudioModels?: ModelRecord + atomicChatModels?: ModelRecord vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[] mcpServers?: McpServer[] commits?: GitCommit[] @@ -402,6 +404,7 @@ export interface WebviewMessage { | "requestOpenAiModels" | "requestOllamaModels" | "requestLmStudioModels" + | "requestAtomicChatModels" | "requestVsCodeLmModels" | "openImage" | "saveImage" diff --git a/src/api/index.ts b/src/api/index.ts index 40ba31f39af..10aea494789 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -15,6 +15,7 @@ import { OpenAiHandler, OpenAiCodexHandler, LmStudioHandler, + AtomicChatHandler, GeminiHandler, OpenAiNativeHandler, DeepSeekHandler, @@ -137,6 +138,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new NativeOllamaHandler(options) case "lmstudio": return new LmStudioHandler(options) + case "atomic-chat": + return new AtomicChatHandler(options) case "gemini": return new GeminiHandler(options) case "openai-codex": diff --git a/src/api/providers/atomic-chat.ts b/src/api/providers/atomic-chat.ts new file mode 100644 index 00000000000..f7fcb5ba48d --- /dev/null +++ b/src/api/providers/atomic-chat.ts @@ -0,0 +1,207 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import { NativeToolCallParser } from "../../core/assistant-message/NativeToolCallParser" +import { TagMatcher } from "../../utils/tag-matcher" + +import { convertToOpenAiMessages } from "../transform/openai-format" +import { ApiStream } from "../transform/stream" + +import { BaseProvider } from "./base-provider" +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +import { getModelsFromCache } from "./fetchers/modelCache" +import { getApiRequestTimeout } from "./utils/timeout-config" +import { handleOpenAIError } from "./utils/openai-error-handler" +import { DEFAULT_HEADERS } from "./constants" + +/** + * Atomic Chat — local OpenAI-compatible API (default http://127.0.0.1:1337/v1). + * @see https://github.com/AtomicBot-ai/Atomic-Chat + */ +export class AtomicChatHandler extends BaseProvider implements SingleCompletionHandler { + protected options: ApiHandlerOptions + private client: OpenAI + private readonly providerName = "Atomic Chat" + + constructor(options: ApiHandlerOptions) { + super() + this.options = options + + const baseRoot = (this.options.atomicChatBaseUrl || "http://127.0.0.1:1337").replace(/\/+$/, "") + const apiKey = this.options.atomicChatApiKey?.trim() || "noop" + + this.client = new OpenAI({ + baseURL: `${baseRoot}/v1`, + apiKey, + timeout: getApiRequestTimeout(), + defaultHeaders: { + ...DEFAULT_HEADERS, + }, + }) + } + + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages), + ] + + const toContentBlocks = ( + blocks: Anthropic.Messages.MessageParam[] | string, + ): Anthropic.Messages.ContentBlockParam[] => { + if (typeof blocks === "string") { + return [{ type: "text", text: blocks }] + } + + const result: Anthropic.Messages.ContentBlockParam[] = [] + for (const msg of blocks) { + if (typeof msg.content === "string") { + result.push({ type: "text", text: msg.content }) + } else if (Array.isArray(msg.content)) { + for (const part of msg.content) { + if (part.type === "text") { + result.push({ type: "text", text: part.text }) + } + } + } + } + return result + } + + let inputTokens = 0 + try { + inputTokens = await this.countTokens([{ type: "text", text: systemPrompt }, ...toContentBlocks(messages)]) + } catch (err) { + console.error("[AtomicChat] Failed to count input tokens:", err) + inputTokens = 0 + } + + let assistantText = "" + + try { + const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming = { + model: this.getModel().id, + messages: openAiMessages, + temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE, + stream: true, + tools: this.convertToolsForOpenAI(metadata?.tools), + tool_choice: metadata?.tool_choice, + parallel_tool_calls: metadata?.parallelToolCalls ?? true, + } + + let results + try { + results = await this.client.chat.completions.create(params) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + const matcher = new TagMatcher( + "think", + (chunk) => + ({ + type: chunk.matched ? "reasoning" : "text", + text: chunk.data, + }) as const, + ) + + for await (const chunk of results) { + const delta = chunk.choices[0]?.delta + const finishReason = chunk.choices[0]?.finish_reason + + if (delta?.content) { + assistantText += delta.content + for (const processedChunk of matcher.update(delta.content)) { + yield processedChunk + } + } + + if (delta?.tool_calls) { + for (const toolCall of delta.tool_calls) { + yield { + type: "tool_call_partial", + index: toolCall.index, + id: toolCall.id, + name: toolCall.function?.name, + arguments: toolCall.function?.arguments, + } + } + } + + if (finishReason) { + const endEvents = NativeToolCallParser.processFinishReason(finishReason) + for (const event of endEvents) { + yield event + } + } + } + + for (const processedChunk of matcher.final()) { + yield processedChunk + } + + let outputTokens = 0 + try { + outputTokens = await this.countTokens([{ type: "text", text: assistantText }]) + } catch (err) { + console.error("[AtomicChat] Failed to count output tokens:", err) + outputTokens = 0 + } + + yield { + type: "usage", + inputTokens, + outputTokens, + } as const + } catch { + throw new Error( + "Atomic Chat request failed. Ensure the app is running, the local API server is enabled, and the model is loaded with enough context for Roo Code.", + ) + } + } + + override getModel(): { id: string; info: ModelInfo } { + const models = getModelsFromCache("atomic-chat") + if (models && this.options.atomicChatModelId && models[this.options.atomicChatModelId]) { + return { + id: this.options.atomicChatModelId, + info: models[this.options.atomicChatModelId], + } + } + return { + id: this.options.atomicChatModelId || "", + info: openAiModelInfoSaneDefaults, + } + } + + async completePrompt(prompt: string): Promise { + try { + const params: OpenAI.Chat.ChatCompletionCreateParamsNonStreaming = { + model: this.getModel().id, + messages: [{ role: "user", content: prompt }], + temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE, + stream: false, + } + + let response + try { + response = await this.client.chat.completions.create(params) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + return response.choices[0]?.message.content || "" + } catch { + throw new Error( + "Atomic Chat request failed. Ensure the app is running and the local API server is reachable.", + ) + } + } +} diff --git a/src/api/providers/fetchers/atomic-chat.ts b/src/api/providers/fetchers/atomic-chat.ts new file mode 100644 index 00000000000..5b1ed3ca60e --- /dev/null +++ b/src/api/providers/fetchers/atomic-chat.ts @@ -0,0 +1,39 @@ +import axios from "axios" +import type { ModelInfo, ModelRecord } from "@roo-code/types" +import { openAiModelInfoSaneDefaults } from "@roo-code/types" + +/** + * Fetches model IDs from Atomic Chat's OpenAI-compatible API. + * @see https://github.com/AtomicBot-ai/Atomic-Chat + */ +export async function getAtomicChatModels(baseUrl = "http://127.0.0.1:1337", apiKey?: string): Promise { + const models: ModelRecord = {} + const root = baseUrl === "" ? "http://127.0.0.1:1337" : baseUrl.replace(/\/+$/, "") + + try { + if (!URL.canParse(root)) { + return models + } + + const headers: Record = {} + if (apiKey?.trim()) { + headers.Authorization = `Bearer ${apiKey.trim()}` + } + + const response = await axios.get<{ data?: Array<{ id: string }> }>(`${root}/v1/models`, { + headers, + timeout: 10_000, + }) + + const list = response.data?.data ?? [] + for (const entry of list) { + if (entry?.id) { + models[entry.id] = { ...openAiModelInfoSaneDefaults } + } + } + + return models + } catch { + return models + } +} diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 2f0b23c6beb..b4500030e3e 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -23,6 +23,7 @@ import { getLiteLLMModels } from "./litellm" import { GetModelsOptions } from "../../../shared/api" import { getOllamaModels } from "./ollama" import { getLMStudioModels } from "./lmstudio" +import { getAtomicChatModels } from "./atomic-chat" import { getPoeModels } from "./poe" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -81,6 +82,9 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise 0) { + provider.postMessageToWebview({ + type: "atomicChatModels", + atomicChatModels: atomicChatModels, + }) + } + } catch (error) { + console.debug("Atomic Chat models fetch failed:", error) + } + break + } case "requestOpenAiModels": if (message?.values?.baseUrl && message?.values?.apiKey) { const openAiModels = await getOpenAiModels( diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts index 7246a90177a..8f4e4acce52 100644 --- a/src/shared/ProfileValidator.ts +++ b/src/shared/ProfileValidator.ts @@ -68,6 +68,8 @@ export class ProfileValidator { return profile.litellmModelId case "lmstudio": return profile.lmStudioModelId + case "atomic-chat": + return profile.atomicChatModelId case "vscode-lm": // We probably need something more flexible for this one, if we need to really support it here. return profile.vsCodeLmModelSelector?.id diff --git a/src/shared/api.ts b/src/shared/api.ts index 66ed4f25ad3..3f5d89def79 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -176,6 +176,7 @@ const dynamicProviderExtras = { unbound: {} as { apiKey?: string }, ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type + "atomic-chat": {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type poe: {} as { apiKey?: string; baseUrl?: string }, } as const satisfies Record diff --git a/src/shared/checkExistApiConfig.ts b/src/shared/checkExistApiConfig.ts index 09e29fe0531..6dcf6ece2c0 100644 --- a/src/shared/checkExistApiConfig.ts +++ b/src/shared/checkExistApiConfig.ts @@ -21,6 +21,7 @@ export function checkExistKey(config: ProviderSettings | undefined) { config.vertexProjectId, config.ollamaModelId, config.lmStudioModelId, + config.atomicChatModelId, config.vsCodeLmModelSelector, ].some((value) => value !== undefined) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index d23bc6fa85a..d58e16ed9f5 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -72,6 +72,7 @@ import { DeepSeek, Gemini, LMStudio, + AtomicChat, LiteLLM, Mistral, Moonshot, @@ -235,6 +236,8 @@ const ApiOptions = ({ vscode.postMessage({ type: "requestOllamaModels" }) } else if (selectedProvider === "lmstudio") { vscode.postMessage({ type: "requestLmStudioModels" }) + } else if (selectedProvider === "atomic-chat") { + vscode.postMessage({ type: "requestAtomicChatModels" }) } else if (selectedProvider === "vscode-lm") { vscode.postMessage({ type: "requestVsCodeLmModels" }) } else if (selectedProvider === "litellm" || selectedProvider === "poe") { @@ -249,6 +252,8 @@ const ApiOptions = ({ apiConfiguration?.openAiApiKey, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl, + apiConfiguration?.atomicChatBaseUrl, + apiConfiguration?.atomicChatApiKey, apiConfiguration?.litellmBaseUrl, apiConfiguration?.litellmApiKey, apiConfiguration?.poeApiKey, @@ -362,6 +367,7 @@ const ApiOptions = ({ openai: { field: "openAiModelId" }, ollama: { field: "ollamaModelId" }, lmstudio: { field: "lmStudioModelId" }, + "atomic-chat": { field: "atomicChatModelId" }, } const config = PROVIDER_MODEL_CONFIG[value] @@ -393,6 +399,7 @@ const ApiOptions = ({ const slugs: Record = { "openai-native": "openai", openai: "openai-compatible", + "atomic-chat": "openai-compatible", } const slug = slugs[selectedProvider] || selectedProvider @@ -604,6 +611,13 @@ const ApiOptions = ({ /> )} + {selectedProvider === "atomic-chat" && ( + + )} + {selectedProvider === "deepseek" && ( diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index 87fe828ce83..cb09fa0ad23 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -53,6 +53,7 @@ export const PROVIDERS = [ { value: "vscode-lm", label: "VS Code LM API", proxy: false }, { value: "mistral", label: "Mistral", proxy: false }, { value: "lmstudio", label: "LM Studio", proxy: true }, + { value: "atomic-chat", label: "Atomic Chat", proxy: true }, { value: "ollama", label: "Ollama", proxy: true }, { value: "requesty", label: "Requesty", proxy: false }, { value: "xai", label: "xAI (Grok)", proxy: false }, diff --git a/webview-ui/src/components/settings/providers/AtomicChat.tsx b/webview-ui/src/components/settings/providers/AtomicChat.tsx new file mode 100644 index 00000000000..6019e1710dc --- /dev/null +++ b/webview-ui/src/components/settings/providers/AtomicChat.tsx @@ -0,0 +1,119 @@ +import { useCallback, useState, useMemo, useEffect } from "react" +import { useEvent } from "react-use" +import { Trans } from "react-i18next" +import { VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" + +import type { ProviderSettings, ExtensionMessage, ModelRecord } from "@roo-code/types" + +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { useRouterModels } from "@src/components/ui/hooks/useRouterModels" +import { vscode } from "@src/utils/vscode" + +import { inputEventTransform } from "../transforms" +import { ModelPicker } from "../ModelPicker" + +type AtomicChatProps = { + apiConfiguration: ProviderSettings + setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void +} + +export const AtomicChat = ({ apiConfiguration, setApiConfigurationField }: AtomicChatProps) => { + const { t } = useAppTranslation() + + const [atomicChatModels, setAtomicChatModels] = useState({}) + const routerModels = useRouterModels() + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + const onMessage = useCallback((event: MessageEvent) => { + const message: ExtensionMessage = event.data + + switch (message.type) { + case "atomicChatModels": + setAtomicChatModels(message.atomicChatModels ?? {}) + break + } + }, []) + + useEvent("message", onMessage) + + useEffect(() => { + vscode.postMessage({ type: "requestAtomicChatModels" }) + }, []) + + const modelNotAvailableError = useMemo(() => { + const selectedModel = apiConfiguration?.atomicChatModelId + if (!selectedModel) return undefined + + if (Object.keys(atomicChatModels).length > 0 && selectedModel in atomicChatModels) { + return undefined + } + + if (routerModels.data?.["atomic-chat"]) { + const availableModels = Object.keys(routerModels.data["atomic-chat"]) + if (!availableModels.includes(selectedModel)) { + return t("settings:validation.modelAvailability", { modelId: selectedModel }) + } + } + + return undefined + }, [apiConfiguration?.atomicChatModelId, routerModels.data, atomicChatModels, t]) + + return ( + <> + + + + + + +
+ {t("settings:providers.atomicChat.apiKeyHelp")} +
+ +
+ , + b: , + span: ( + + Note: + + ), + }} + /> +
+ + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index 7badb543115..9d6df630795 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -3,6 +3,7 @@ export { Bedrock } from "./Bedrock" export { DeepSeek } from "./DeepSeek" export { Gemini } from "./Gemini" export { LMStudio } from "./LMStudio" +export { AtomicChat } from "./AtomicChat" export { Mistral } from "./Mistral" export { Moonshot } from "./Moonshot" export { Ollama } from "./Ollama" diff --git a/webview-ui/src/components/settings/utils/providerModelConfig.ts b/webview-ui/src/components/settings/utils/providerModelConfig.ts index 8631c5571d4..adc149a30fe 100644 --- a/webview-ui/src/components/settings/utils/providerModelConfig.ts +++ b/webview-ui/src/components/settings/utils/providerModelConfig.ts @@ -43,6 +43,7 @@ export const PROVIDER_SERVICE_CONFIG: Partial + new Promise((resolve, reject) => { + const cleanup = () => { + window.removeEventListener("message", handler) + } + + const timeout = setTimeout(() => { + cleanup() + reject(new Error("Atomic Chat models request timed out")) + }, 10_000) + + const handler = (event: MessageEvent) => { + const message: ExtensionMessage = event.data + + if (message.type === "atomicChatModels") { + clearTimeout(timeout) + cleanup() + + if (message.atomicChatModels) { + resolve(message.atomicChatModels) + } else { + reject(new Error("No Atomic Chat models in response")) + } + } + } + + window.addEventListener("message", handler) + vscode.postMessage({ type: "requestAtomicChatModels" }) + }) + +export const useAtomicChatModels = (modelId?: string) => + useQuery({ queryKey: ["atomicChatModels"], queryFn: () => (modelId ? getAtomicChatModels() : {}) }) diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index bf78236b824..10cbbf84e3f 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -36,6 +36,7 @@ import { import { useRouterModels } from "./useRouterModels" import { useOpenRouterModelProviders } from "./useOpenRouterModelProviders" import { useLmStudioModels } from "./useLmStudioModels" +import { useAtomicChatModels } from "./useAtomicChatModels" import { useOllamaModels } from "./useOllamaModels" /** @@ -56,6 +57,7 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const dynamicProvider = activeProvider && isDynamicProvider(activeProvider) ? activeProvider : undefined const openRouterModelId = activeProvider === "openrouter" ? apiConfiguration?.openRouterModelId : undefined const lmStudioModelId = activeProvider === "lmstudio" ? apiConfiguration?.lmStudioModelId : undefined + const atomicChatModelId = activeProvider === "atomic-chat" ? apiConfiguration?.atomicChatModelId : undefined const ollamaModelId = activeProvider === "ollama" ? apiConfiguration?.ollamaModelId : undefined // Only fetch router models for dynamic providers @@ -67,12 +69,14 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const openRouterModelProviders = useOpenRouterModelProviders(openRouterModelId) const lmStudioModels = useLmStudioModels(lmStudioModelId) + const atomicChatModels = useAtomicChatModels(atomicChatModelId) const ollamaModels = useOllamaModels(ollamaModelId) // Compute readiness only for the data actually needed for the selected provider const needRouterModels = shouldFetchRouterModels const needOpenRouterProviders = activeProvider === "openrouter" const needLmStudio = typeof lmStudioModelId !== "undefined" + const needAtomicChat = typeof atomicChatModelId !== "undefined" const needOllama = typeof ollamaModelId !== "undefined" const hasValidRouterData = @@ -85,6 +89,7 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const isReady = (!needLmStudio || typeof lmStudioModels.data !== "undefined") && + (!needAtomicChat || typeof atomicChatModels.data !== "undefined") && (!needOllama || typeof ollamaModels.data !== "undefined") && hasValidRouterData && (!needOpenRouterProviders || typeof openRouterModelProviders.data !== "undefined") @@ -97,6 +102,7 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { routerModels: (routerModels.data || {}) as RouterModels, openRouterModelProviders: (openRouterModelProviders.data || {}) as Record, lmStudioModels: (lmStudioModels.data || undefined) as ModelRecord | undefined, + atomicChatModels: (atomicChatModels.data || undefined) as ModelRecord | undefined, ollamaModels: (ollamaModels.data || undefined) as ModelRecord | undefined, }) : { id: getProviderDefaultModelId(activeProvider ?? "openrouter"), info: undefined } @@ -109,11 +115,13 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { (needRouterModels && routerModels.isLoading) || (needOpenRouterProviders && openRouterModelProviders.isLoading) || (needLmStudio && lmStudioModels!.isLoading) || + (needAtomicChat && atomicChatModels!.isLoading) || (needOllama && ollamaModels!.isLoading), isError: (needRouterModels && routerModels.isError) || (needOpenRouterProviders && openRouterModelProviders.isError) || (needLmStudio && lmStudioModels!.isError) || + (needAtomicChat && atomicChatModels!.isError) || (needOllama && ollamaModels!.isError), } } @@ -124,6 +132,7 @@ function getSelectedModel({ routerModels, openRouterModelProviders, lmStudioModels, + atomicChatModels, ollamaModels, }: { provider: ProviderName @@ -131,6 +140,7 @@ function getSelectedModel({ routerModels: RouterModels openRouterModelProviders: Record lmStudioModels: ModelRecord | undefined + atomicChatModels: ModelRecord | undefined ollamaModels: ModelRecord | undefined }): { id: string; info: ModelInfo | undefined } { // the `undefined` case are used to show the invalid selection to prevent @@ -294,6 +304,14 @@ function getSelectedModel({ info: modelInfo ? { ...lMStudioDefaultModelInfo, ...modelInfo } : undefined, } } + case "atomic-chat": { + const id = apiConfiguration.atomicChatModelId ?? "" + const modelInfo = atomicChatModels && atomicChatModels[apiConfiguration.atomicChatModelId!] + return { + id, + info: modelInfo ? { ...lMStudioDefaultModelInfo, ...modelInfo } : undefined, + } + } case "vscode-lm": { const id = apiConfiguration?.vsCodeLmModelSelector ? `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 01b6cd3e918..f4a8a590b32 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -515,6 +515,13 @@ "noModelsFound": "No draft models found. Please ensure LM Studio is running with Server Mode enabled.", "description": "LM Studio allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide. You will also need to start LM Studio's local server feature to use it with this extension. Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected." }, + "atomicChat": { + "baseUrl": "Base URL (optional)", + "apiKey": "API key (optional)", + "apiKeyPlaceholder": "Leave empty for default local server", + "apiKeyHelp": "Only needed if your Atomic Chat server requires Bearer authentication.", + "description": "Atomic Chat runs local models and exposes an OpenAI-compatible API. Enable the local API server in Atomic Chat (default http://127.0.0.1:1337/v1), then pick a model below. Note: Roo Code uses complex prompts and works best with capable models; smaller local models may hit context limits." + }, "ollama": { "baseUrl": "Base URL (optional)", "modelId": "Model ID", @@ -957,6 +964,7 @@ "modelId": { "lmStudio": "e.g. meta-llama-3.1-8b-instruct", "lmStudioDraft": "e.g. lmstudio-community/llama-3.2-1b-instruct", + "atomicChat": "e.g. gemma-4-E4B-it-IQ4_XS", "ollama": "e.g. llama3.1" }, "numbers": { @@ -970,6 +978,7 @@ "defaults": { "ollamaUrl": "Default: http://localhost:11434", "lmStudioUrl": "Default: http://localhost:1234", + "atomicChatUrl": "Default: http://127.0.0.1:1337", "geminiUrl": "Default: https://generativelanguage.googleapis.com" }, "labels": { diff --git a/webview-ui/src/utils/__tests__/validate.spec.ts b/webview-ui/src/utils/__tests__/validate.spec.ts index e0b13fd49fa..c54ad7a405f 100644 --- a/webview-ui/src/utils/__tests__/validate.spec.ts +++ b/webview-ui/src/utils/__tests__/validate.spec.ts @@ -43,6 +43,7 @@ describe("Model Validation Functions", () => { litellm: {}, ollama: {}, lmstudio: {}, + "atomic-chat": {}, "vercel-ai-gateway": {}, poe: {}, } diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index f506171acce..938a026d635 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -103,6 +103,11 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.modelId") } break + case "atomic-chat": + if (!apiConfiguration.atomicChatModelId) { + return i18next.t("settings:validation.modelId") + } + break case "vscode-lm": if (!apiConfiguration.vsCodeLmModelSelector) { return i18next.t("settings:validation.modelSelector")