From e3ca2315d337535fd76c39d12e40cf4611b1e497 Mon Sep 17 00:00:00 2001 From: Argenis Date: Mon, 16 Feb 2026 23:23:02 -0500 Subject: [PATCH] fix(nvidia): use correct NVIDIA_API_KEY environment variable - Fixes the environment variable name from `NVIDIA_NIM_API_KEY` to `NVIDIA_API_KEY` to match NVIDIA's official documentation - Adds model suggestions for NVIDIA NIM provider in the onboarding wizard Co-Authored-By: Claude Opus 4.6 --- src/onboard/wizard.rs | 14 +++++++++++++- src/providers/mod.rs | 12 ++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/onboard/wizard.rs b/src/onboard/wizard.rs index cf35181..c6bd6ae 100644 --- a/src/onboard/wizard.rs +++ b/src/onboard/wizard.rs @@ -1276,7 +1276,7 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String)> { // ── Tier selection ── let tiers = vec![ "⭐ Recommended (OpenRouter, Venice, Anthropic, OpenAI, Gemini)", - "⚡ Fast inference (Groq, Fireworks, Together AI)", + "⚡ Fast inference (Groq, Fireworks, Together AI, NVIDIA NIM)", "🌐 Gateway / proxy (Vercel AI, Cloudflare AI, Amazon Bedrock)", "🔬 Specialized (Moonshot/Kimi, GLM/Zhipu, MiniMax, Qianfan, Z.AI, Synthetic, OpenCode Zen, Cohere)", "🏠 Local / private (Ollama — no API key needed)", @@ -1311,6 +1311,7 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String)> { ("groq", "Groq — ultra-fast LPU inference"), ("fireworks", "Fireworks AI — fast open-source inference"), ("together-ai", "Together AI — open-source model hosting"), + ("nvidia", "NVIDIA NIM — DeepSeek, Llama, & more"), ], 2 => vec![ ("vercel", "Vercel AI Gateway"), @@ -1452,6 +1453,7 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String)> { "minimax" => "https://www.minimaxi.com/user-center/basic-information", "vercel" => "https://vercel.com/account/tokens", "cloudflare" => "https://dash.cloudflare.com/profile/api-tokens", + "nvidia" | "nvidia-nim" | "build.nvidia.com" => "https://build.nvidia.com/", "bedrock" => "https://console.aws.amazon.com/iam", "gemini" => "https://aistudio.google.com/app/apikey", _ => "", @@ -1573,6 +1575,12 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String)> { ), ("mistralai/Mixtral-8x22B-Instruct-v0.1", "Mixtral 8x22B"), ], + "nvidia" | "nvidia-nim" | "build.nvidia.com" => vec![ + ("deepseek-ai/DeepSeek-R1", "DeepSeek R1 (reasoning)"), + ("meta/llama-3.1-70b-instruct", "Llama 3.1 70B Instruct"), + ("mistralai/Mistral-7B-Instruct-v0.3", "Mistral 7B Instruct"), + ("meta/llama-3.1-405b-instruct", "Llama 3.1 405B Instruct"), + ], "cohere" => vec![ ("command-r-plus", "Command R+ (flagship)"), ("command-r", "Command R (fast)"), @@ -1796,6 +1804,7 @@ fn provider_env_var(name: &str) -> &'static str { "cloudflare" | "cloudflare-ai" => "CLOUDFLARE_API_KEY", "bedrock" | "aws-bedrock" => "AWS_ACCESS_KEY_ID", "gemini" => "GEMINI_API_KEY", + "nvidia" | "nvidia-nim" | "build.nvidia.com" => "NVIDIA_API_KEY", _ => "API_KEY", } } @@ -4460,6 +4469,9 @@ mod tests { assert_eq!(provider_env_var("google"), "GEMINI_API_KEY"); // alias assert_eq!(provider_env_var("google-gemini"), "GEMINI_API_KEY"); // alias assert_eq!(provider_env_var("gemini"), "GEMINI_API_KEY"); + assert_eq!(provider_env_var("nvidia"), "NVIDIA_API_KEY"); + assert_eq!(provider_env_var("nvidia-nim"), "NVIDIA_API_KEY"); // alias + assert_eq!(provider_env_var("build.nvidia.com"), "NVIDIA_API_KEY"); // alias } #[test] diff --git a/src/providers/mod.rs b/src/providers/mod.rs index 5e91e40..86517d6 100644 --- a/src/providers/mod.rs +++ b/src/providers/mod.rs @@ -130,6 +130,7 @@ fn resolve_api_key(name: &str, api_key: Option<&str>) -> Option { vec!["DASHSCOPE_API_KEY"] } "zai" | "z.ai" => vec!["ZAI_API_KEY"], + "nvidia" | "nvidia-nim" | "build.nvidia.com" => vec!["NVIDIA_API_KEY"], "synthetic" => vec!["SYNTHETIC_API_KEY"], "opencode" | "opencode-zen" => vec!["OPENCODE_API_KEY"], "vercel" | "vercel-ai" => vec!["VERCEL_API_KEY"], @@ -279,6 +280,9 @@ pub fn create_provider(name: &str, api_key: Option<&str>) -> anyhow::Result Ok(Box::new(OpenAiCompatibleProvider::new( "GitHub Copilot", "https://api.githubcopilot.com", key, AuthStyle::Bearer, ))), + "nvidia" | "nvidia-nim" | "build.nvidia.com" => Ok(Box::new(OpenAiCompatibleProvider::new( + "NVIDIA NIM", "https://integrate.api.nvidia.com/v1", key, AuthStyle::Bearer, + ))), // ── Bring Your Own Provider (custom URL) ─────────── // Format: "custom:https://your-api.com" or "custom:http://localhost:1234" @@ -603,6 +607,13 @@ mod tests { assert!(create_provider("github-copilot", Some("key")).is_ok()); } + #[test] + fn factory_nvidia() { + assert!(create_provider("nvidia", Some("nvapi-test")).is_ok()); + assert!(create_provider("nvidia-nim", Some("nvapi-test")).is_ok()); + assert!(create_provider("build.nvidia.com", Some("nvapi-test")).is_ok()); + } + // ── Custom / BYOP provider ───────────────────────────── #[test] @@ -792,6 +803,7 @@ mod tests { "perplexity", "cohere", "copilot", + "nvidia", ]; for name in providers { assert!(