mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-28 20:46:57 +02:00
fix(nvidia): align NIM provider metadata
Persist the NVIDIA_API_KEY marker in generated catalog output and mark bundled NVIDIA Chat Completions models as string-content compatible.\n\nFixes #73013.\nFixes #50107.\nRefs #73014.
This commit is contained in:
@@ -12,6 +12,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- NVIDIA/NIM: persist the `NVIDIA_API_KEY` provider marker and mark bundled NVIDIA Chat Completions models as string-content compatible, so NIM models load from `models.json` and OpenAI-compatible subagent calls send plain text content. Fixes #73013 and #50107; refs #73014. Thanks @bautrey, @iot2edge, @ifearghal, and @futhgar.
|
||||
- CLI/plugins: use plugin metadata snapshots for install slot selection and add opt-in plugin lifecycle timing traces, so plugin install avoids runtime-loading the plugin registry for metadata-only decisions. Thanks @shakkernerd.
|
||||
- fix(plugins): restrict bundled plugin dir resolution to trusted package roots. (#73275) Thanks @pgondhi987.
|
||||
- fix(security): prevent workspace PATH injection via service env and trash helpers. (#73264) Thanks @pgondhi987.
|
||||
|
||||
@@ -29,6 +29,9 @@
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"compat": {
|
||||
"requiresStringContent": true
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -42,6 +45,9 @@
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"compat": {
|
||||
"requiresStringContent": true
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -55,6 +61,9 @@
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"compat": {
|
||||
"requiresStringContent": true
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -68,6 +77,9 @@
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"compat": {
|
||||
"requiresStringContent": true
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -7,11 +7,15 @@ describe("nvidia provider catalog", () => {
|
||||
|
||||
expect(provider.baseUrl).toBe("https://integrate.api.nvidia.com/v1");
|
||||
expect(provider.api).toBe("openai-completions");
|
||||
expect(provider.apiKey).toBe("NVIDIA_API_KEY");
|
||||
expect(provider.models.map((model) => model.id)).toEqual([
|
||||
"nvidia/nemotron-3-super-120b-a12b",
|
||||
"moonshotai/kimi-k2.5",
|
||||
"minimaxai/minimax-m2.5",
|
||||
"z-ai/glm5",
|
||||
]);
|
||||
expect(provider.models.every((model) => model.compat?.requiresStringContent === true)).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,8 +3,11 @@ import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-sha
|
||||
import manifest from "./openclaw.plugin.json" with { type: "json" };
|
||||
|
||||
export function buildNvidiaProvider(): ModelProviderConfig {
|
||||
return buildManifestModelProviderConfig({
|
||||
providerId: "nvidia",
|
||||
catalog: manifest.modelCatalog.providers.nvidia,
|
||||
});
|
||||
return {
|
||||
...buildManifestModelProviderConfig({
|
||||
providerId: "nvidia",
|
||||
catalog: manifest.modelCatalog.providers.nvidia,
|
||||
}),
|
||||
apiKey: "NVIDIA_API_KEY",
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user