diff --git a/index.ts b/index.ts index c878a1e0..4fe60501 100644 --- a/index.ts +++ b/index.ts @@ -332,6 +332,37 @@ function resolveProviderApiFromRuntimeConfig( return typeof api === "string" && api.trim() ? api.trim() : undefined; } +/** Resolve full provider config from runtime config if available. */ +function resolveProviderConfigFromRuntimeConfig( + runtimeConfig: unknown, + provider: string, +): Record | undefined { + if (!isRecord(runtimeConfig)) { + return undefined; + } + const providers = (runtimeConfig as { models?: { providers?: Record } }).models + ?.providers; + if (!providers || !isRecord(providers)) { + return undefined; + } + const value = findProviderConfigValue(providers, provider); + return isRecord(value) ? value : undefined; +} + +/** Pi-ai expects Ollama through the OpenAI-compatible /v1 surface. */ +function normalizeOllamaBaseUrl(baseUrl: string | undefined): string | undefined { + const trimmed = baseUrl?.trim(); + if (!trimmed) { + return undefined; + } + + const withoutTrailingSlash = trimmed.replace(/\/+$/, ""); + if (/\/v1$/i.test(withoutTrailingSlash)) { + return withoutTrailingSlash; + } + return `${withoutTrailingSlash}/v1`; +} + /** Resolve runtime.modelAuth from plugin runtime when available. */ function getRuntimeModelAuth(api: OpenClawPluginApi): RuntimeModelAuth | undefined { const runtime = api.runtime as OpenClawPluginApi["runtime"] & { @@ -897,11 +928,54 @@ function createLcmDependencies(api: OpenClawPluginApi): LcmDependencies { } } + const loadedRuntimeConfig = (() => { + try { + return api.runtime.config.loadConfig(); + } catch { + return undefined; + } + })(); + + const providerConfig = + resolveProviderConfigFromRuntimeConfig(effectiveRuntimeConfig, providerId) || + resolveProviderConfigFromRuntimeConfig(api.config, providerId) || + resolveProviderConfigFromRuntimeConfig(loadedRuntimeConfig, providerId); + let effectiveProviderApi = + providerApi?.trim() || + resolveProviderApiFromRuntimeConfig(effectiveRuntimeConfig, providerId) || + resolveProviderApiFromRuntimeConfig(api.config, providerId) || + resolveProviderApiFromRuntimeConfig(loadedRuntimeConfig, providerId); + let providerLevelConfig: Record = providerConfig ? { ...providerConfig } : {}; + let preferredApiKey = apiKey?.trim(); + + if ( + normalizeProviderId(providerId) === "ollama" && + normalizeProviderId(effectiveProviderApi ?? "ollama") === "ollama" + ) { + effectiveProviderApi = "openai-completions"; + const normalizedBaseUrl = normalizeOllamaBaseUrl( + typeof providerLevelConfig.baseUrl === "string" ? providerLevelConfig.baseUrl : undefined, + ); + providerLevelConfig = { + ...providerLevelConfig, + api: "openai-completions", + ...(normalizedBaseUrl ? { baseUrl: normalizedBaseUrl } : {}), + }; + if (!preferredApiKey && typeof providerConfig?.apiKey === "string" && providerConfig.apiKey.trim()) { + preferredApiKey = providerConfig.apiKey.trim(); + } + if (!preferredApiKey) { + preferredApiKey = "ollama-local"; + } + } + const knownModel = typeof mod.getModel === "function" ? mod.getModel(providerId, modelId) : undefined; const fallbackApi = - providerApi?.trim() || - resolveProviderApiFromRuntimeConfig(effectiveRuntimeConfig, providerId) || + effectiveProviderApi?.trim() || + (typeof providerLevelConfig.api === "string" && providerLevelConfig.api.trim() + ? providerLevelConfig.api.trim() + : undefined) || (() => { if (typeof mod.getModels !== "function") { return undefined; @@ -915,20 +989,12 @@ function createLcmDependencies(api: OpenClawPluginApi): LcmDependencies { })() || inferApiFromProvider(providerId); - // Resolve provider-level config (baseUrl, headers, etc.) from runtime config. - // Custom/proxy providers (e.g. bailian, local proxies) store their baseUrl and - // apiKey under models.providers. in openclaw.json. Without this - // lookup the resolved model object lacks baseUrl, which crashes pi-ai's - // detectCompat() ("Cannot read properties of undefined (reading 'includes')"), - // and the apiKey is unresolvable, causing 401 errors. See #19. - const providerLevelConfig: Record = (() => { - if (!isRecord(effectiveRuntimeConfig)) return {}; - const providers = (effectiveRuntimeConfig as { models?: { providers?: Record } }) - .models?.providers; - if (!providers) return {}; - const cfg = findProviderConfigValue(providers, providerId); - return isRecord(cfg) ? cfg : {}; - })(); + const resolvedKnownModelApi = + effectiveProviderApi?.trim() || + (typeof providerLevelConfig.api === "string" && providerLevelConfig.api.trim() + ? providerLevelConfig.api.trim() + : undefined) || + knownModel.api; const resolvedModel = isRecord(knownModel) && @@ -939,7 +1005,7 @@ function createLcmDependencies(api: OpenClawPluginApi): LcmDependencies { ...knownModel, id: knownModel.id, provider: knownModel.provider, - api: knownModel.api, + api: resolvedKnownModelApi, // Merge baseUrl/headers from provider config if not already on the model. // Always set baseUrl to a string — pi-ai's detectCompat() crashes when // baseUrl is undefined. @@ -978,7 +1044,7 @@ function createLcmDependencies(api: OpenClawPluginApi): LcmDependencies { : {}), }; - let resolvedApiKey = apiKey?.trim(); + let resolvedApiKey = preferredApiKey; if (!resolvedApiKey && modelAuth) { try { resolvedApiKey = resolveApiKeyFromAuthResult( diff --git a/src/summarize.ts b/src/summarize.ts index 6c4f60c6..18261517 100644 --- a/src/summarize.ts +++ b/src/summarize.ts @@ -37,16 +37,23 @@ function normalizeProviderId(provider: string): string { return provider.trim().toLowerCase(); } -/** - * Resolve provider API override from legacy OpenClaw config. - * - * When model ids are custom/forward-compat, this hint allows deps.complete to - * construct a valid pi-ai Model object even if getModel(provider, model) misses. - */ -function resolveProviderApiFromLegacyConfig( +type LegacyProviderConfig = Record & { + api?: unknown; + baseUrl?: unknown; + apiKey?: unknown; +}; + +type SummarizerRuntimeProviderAdaptation = { + providerApi?: string; + runtimeConfig: unknown; + apiKey?: string; +}; + +/** Resolve full provider config from legacy OpenClaw runtime config. */ +function resolveProviderConfigFromLegacyConfig( config: unknown, provider: string, -): string | undefined { +): LegacyProviderConfig | undefined { if (!config || typeof config !== "object") { return undefined; } @@ -58,10 +65,7 @@ function resolveProviderApiFromLegacyConfig( const direct = providers[provider]; if (direct && typeof direct === "object") { - const api = (direct as { api?: unknown }).api; - if (typeof api === "string" && api.trim()) { - return api.trim(); - } + return direct as LegacyProviderConfig; } const normalizedProvider = normalizeProviderId(provider); @@ -72,14 +76,103 @@ function resolveProviderApiFromLegacyConfig( if (!value || typeof value !== "object") { continue; } - const api = (value as { api?: unknown }).api; - if (typeof api === "string" && api.trim()) { - return api.trim(); - } + return value as LegacyProviderConfig; } return undefined; } +/** Copy legacy runtime config while overriding a single provider entry. */ +function overrideLegacyProviderConfig( + runtimeConfig: unknown, + provider: string, + override: LegacyProviderConfig, +): unknown { + if (!isRecord(runtimeConfig)) { + return runtimeConfig; + } + + const models = isRecord(runtimeConfig.models) ? runtimeConfig.models : {}; + const providers = isRecord(models.providers) ? models.providers : {}; + const nextProviders: Record = { ...providers }; + + let providerKey = provider; + const normalizedProvider = normalizeProviderId(provider); + for (const key of Object.keys(providers)) { + if (normalizeProviderId(key) === normalizedProvider) { + providerKey = key; + break; + } + } + + nextProviders[providerKey] = override; + + return { + ...runtimeConfig, + models: { + ...models, + providers: nextProviders, + }, + }; +} + +/** Pi-ai expects Ollama through the OpenAI-compatible `/v1` surface. */ +function normalizeOllamaBaseUrl(baseUrl: string | undefined): string | undefined { + const trimmed = baseUrl?.trim(); + if (!trimmed) { + return undefined; + } + + const withoutTrailingSlash = trimmed.replace(/\/+$/, ""); + if (/\/v1$/i.test(withoutTrailingSlash)) { + return withoutTrailingSlash; + } + return `${withoutTrailingSlash}/v1`; +} + +/** Adapt native OpenClaw Ollama provider config into pi-ai compatible inputs. */ +function adaptSummarizerRuntimeProvider(params: { + runtimeConfig: unknown; + provider: string; + apiKey?: string; +}): SummarizerRuntimeProviderAdaptation { + const providerConfig = resolveProviderConfigFromLegacyConfig(params.runtimeConfig, params.provider); + const providerApi = + typeof providerConfig?.api === "string" && providerConfig.api.trim() + ? providerConfig.api.trim() + : undefined; + + if (!providerApi || normalizeProviderId(providerApi) !== "ollama") { + return { + providerApi, + runtimeConfig: params.runtimeConfig, + apiKey: params.apiKey, + }; + } + + const normalizedBaseUrl = normalizeOllamaBaseUrl( + typeof providerConfig.baseUrl === "string" ? providerConfig.baseUrl : undefined, + ); + const adaptedProviderConfig: LegacyProviderConfig = { + ...providerConfig, + api: "openai-completions", + ...(normalizedBaseUrl ? { baseUrl: normalizedBaseUrl } : {}), + }; + const configApiKey = + typeof providerConfig.apiKey === "string" && providerConfig.apiKey.trim() + ? providerConfig.apiKey.trim() + : undefined; + + return { + providerApi: "openai-completions", + runtimeConfig: overrideLegacyProviderConfig( + params.runtimeConfig, + params.provider, + adaptedProviderConfig, + ), + apiKey: params.apiKey ?? configApiKey ?? "ollama-local", + }; +} + /** Approximate token estimate used for target-sizing prompts. */ function estimateTokens(text: string): number { return Math.ceil(text.length / 4); @@ -670,7 +763,6 @@ export async function createLcmSummarizeFromLegacyParams(params: { typeof params.legacyParams.agentDir === "string" && params.legacyParams.agentDir.trim() ? params.legacyParams.agentDir.trim() : undefined; - const providerApi = resolveProviderApiFromLegacyConfig(params.legacyParams.config, provider); const condensedTargetTokens = Number.isFinite(params.deps.config.condensedTargetTokens) && @@ -689,9 +781,14 @@ export async function createLcmSummarizeFromLegacyParams(params: { const mode: SummaryMode = aggressive ? "aggressive" : "normal"; const isCondensed = options?.isCondensed === true; - const apiKey = await params.deps.getApiKey(provider, model, { + const resolvedApiKey = await params.deps.getApiKey(provider, model, { profileId: authProfileId, }); + const runtimeProvider = adaptSummarizerRuntimeProvider({ + runtimeConfig: params.legacyParams.config, + provider, + apiKey: resolvedApiKey, + }); const targetTokens = resolveTargetTokens({ inputTokens: estimateTokens(text), mode, @@ -720,11 +817,11 @@ export async function createLcmSummarizeFromLegacyParams(params: { const result = await params.deps.complete({ provider, model, - apiKey, - providerApi, + apiKey: runtimeProvider.apiKey, + providerApi: runtimeProvider.providerApi, authProfileId, agentDir, - runtimeConfig: params.legacyParams.config, + runtimeConfig: runtimeProvider.runtimeConfig, system: LCM_SUMMARIZER_SYSTEM_PROMPT, messages: [ { @@ -778,11 +875,11 @@ export async function createLcmSummarizeFromLegacyParams(params: { const retryResult = await params.deps.complete({ provider, model, - apiKey, - providerApi, + apiKey: runtimeProvider.apiKey, + providerApi: runtimeProvider.providerApi, authProfileId, agentDir, - runtimeConfig: params.legacyParams.config, + runtimeConfig: runtimeProvider.runtimeConfig, system: LCM_SUMMARIZER_SYSTEM_PROMPT, messages: [ { diff --git a/test/index-complete-provider-config.test.ts b/test/index-complete-provider-config.test.ts index 85bdf2d4..f2b0c2a5 100644 --- a/test/index-complete-provider-config.test.ts +++ b/test/index-complete-provider-config.test.ts @@ -156,7 +156,7 @@ describe("createLcmDependencies.complete provider config resolution", () => { model: "unit-model", }); - expect(loadConfig).toHaveBeenCalledTimes(1); + expect(loadConfig).toHaveBeenCalled(); expect(piAiMock.completeSimple).toHaveBeenCalledTimes(1); expect(piAiMock.completeSimple).toHaveBeenCalledWith( expect.objectContaining({ @@ -259,4 +259,91 @@ describe("createLcmDependencies.complete provider config resolution", () => { expect.any(Object), ); }); + + it("adapts native ollama known models onto the openai-compatible local /v1 lane", async () => { + piAiMock.getModel.mockReturnValue({ + id: "qwen2.5:14b", + provider: "ollama", + api: "ollama", + name: "Qwen 2.5 14B", + }); + + await callComplete({ + loadConfigResult: { + models: { + providers: { + ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434", + }, + }, + }, + }, + provider: "ollama", + model: "qwen2.5:14b", + runtimeConfig: { + models: { + providers: { + ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434", + }, + }, + }, + }, + }); + + expect(piAiMock.completeSimple).toHaveBeenCalledWith( + expect.objectContaining({ + id: "qwen2.5:14b", + provider: "ollama", + api: "openai-completions", + baseUrl: "http://127.0.0.1:11434/v1", + }), + expect.any(Object), + expect.objectContaining({ + apiKey: "ollama-local", + maxTokens: 256, + }), + ); + }); + + it("falls back to api.config provider details when runtimeConfig omits them", async () => { + piAiMock.getModel.mockReturnValue({ + id: "qwen2.5:14b", + provider: "ollama", + api: "ollama", + name: "Qwen 2.5 14B", + }); + + await callComplete({ + loadConfigResult: { + models: { + providers: { + ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434", + apiKey: "ollama-local", + }, + }, + }, + }, + provider: "ollama", + model: "qwen2.5:14b", + runtimeConfig: {}, + }); + + expect(piAiMock.completeSimple).toHaveBeenCalledWith( + expect.objectContaining({ + id: "qwen2.5:14b", + provider: "ollama", + api: "openai-completions", + baseUrl: "http://127.0.0.1:11434/v1", + }), + expect.any(Object), + expect.objectContaining({ + apiKey: "ollama-local", + }), + ); + }); }); diff --git a/test/summarize.test.ts b/test/summarize.test.ts index 27d987b9..d8d35382 100644 --- a/test/summarize.test.ts +++ b/test/summarize.test.ts @@ -152,6 +152,96 @@ describe("createLcmSummarizeFromLegacyParams", () => { expect(completeMock.mock.calls[0]?.[0]?.apiKey).toBe("resolved-api-key"); }); + it("adapts native Ollama provider config for pi-ai with normalized /v1 baseUrl", async () => { + const deps = makeDeps({ + resolveModel: vi.fn(() => ({ + provider: "ollama", + model: "llama3.2", + })), + getApiKey: vi.fn(async () => undefined), + }); + + const summarize = await createLcmSummarizeFromLegacyParams({ + deps, + legacyParams: { + provider: "ollama", + model: "llama3.2", + config: { + models: { + providers: { + ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434/", + }, + }, + }, + }, + }, + }); + + await summarize!("Summary input"); + + const request = vi.mocked(deps.complete).mock.calls[0]?.[0]; + expect(request?.providerApi).toBe("openai-completions"); + expect(request?.apiKey).toBe("ollama-local"); + expect(request?.runtimeConfig).toMatchObject({ + models: { + providers: { + ollama: { + api: "openai-completions", + baseUrl: "http://127.0.0.1:11434/v1", + }, + }, + }, + }); + }); + + it("uses config-backed apiKey for native Ollama when runtime auth lookup returns nothing", async () => { + const deps = makeDeps({ + resolveModel: vi.fn(() => ({ + provider: "ollama", + model: "llama3.2", + })), + getApiKey: vi.fn(async () => undefined), + }); + + const summarize = await createLcmSummarizeFromLegacyParams({ + deps, + legacyParams: { + provider: "ollama", + model: "llama3.2", + config: { + models: { + providers: { + Ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434/v1", + apiKey: "config-ollama-key", + }, + }, + }, + }, + }, + }); + + await summarize!("Summary input"); + + const request = vi.mocked(deps.complete).mock.calls[0]?.[0]; + expect(request?.providerApi).toBe("openai-completions"); + expect(request?.apiKey).toBe("config-ollama-key"); + expect(request?.runtimeConfig).toMatchObject({ + models: { + providers: { + Ollama: { + api: "openai-completions", + baseUrl: "http://127.0.0.1:11434/v1", + apiKey: "config-ollama-key", + }, + }, + }, + }); + }); + it("falls back deterministically when model returns empty summary output after retry", async () => { const deps = makeDeps({ complete: vi.fn(async () => ({