fix(onboard): refresh MiniMax defaults and endpoint (#299)

This commit is contained in:
Chummy 2026-02-16 23:40:44 +08:00
parent a85fcf43c3
commit fac1b780cd
6 changed files with 168 additions and 7 deletions

View file

@ -186,7 +186,7 @@ async fn process_channel_message(ctx: Arc<ChannelRuntimeContext>, msg: traits::C
&mut history,
ctx.tools_registry.as_ref(),
ctx.observer.as_ref(),
ctx.provider_name.as_str(),
"channels",
ctx.model.as_str(),
ctx.temperature,
),

View file

@ -919,8 +919,7 @@ mod tests {
#[test]
fn telegram_split_at_newline() {
let line = "Line of text\n";
let text_block = line.repeat(TELEGRAM_MAX_MESSAGE_LENGTH / line.len() + 1);
let text_block = "Line of text\n".repeat(TELEGRAM_MAX_MESSAGE_LENGTH / 13 + 1);
let chunks = split_message_for_telegram(&text_block);
assert!(chunks.len() >= 2);
for chunk in chunks {

View file

@ -428,11 +428,20 @@ fn canonical_provider_name(provider_name: &str) -> &str {
}
/// Pick a sensible default model for the given provider.
const MINIMAX_ONBOARD_MODELS: [(&str, &str); 5] = [
("MiniMax-M2.5", "MiniMax M2.5 (latest, recommended)"),
("MiniMax-M2.5-highspeed", "MiniMax M2.5 High-Speed (faster)"),
("MiniMax-M2.1", "MiniMax M2.1 (stable)"),
("MiniMax-M2.1-highspeed", "MiniMax M2.1 High-Speed (faster)"),
("MiniMax-M2", "MiniMax M2 (legacy)"),
];
fn default_model_for_provider(provider: &str) -> String {
match canonical_provider_name(provider) {
"anthropic" => "claude-sonnet-4-20250514".into(),
"openai" => "gpt-5.2".into(),
"glm" | "zhipu" | "zai" | "z.ai" => "glm-5".into(),
"minimax" => "MiniMax-M2.5".into(),
"ollama" => "llama3.2".into(),
"groq" => "llama-3.3-70b-versatile".into(),
"deepseek" => "deepseek-chat".into(),
@ -1454,7 +1463,131 @@ fn setup_provider(workspace_dir: &Path) -> Result<(String, String, String)> {
};
// ── Model selection ──
let mut model_options = curated_models_for_provider(provider_name);
let models: Vec<(&str, &str)> = match provider_name {
"openrouter" => vec![
(
"anthropic/claude-sonnet-4",
"Claude Sonnet 4 (balanced, recommended)",
),
(
"anthropic/claude-3.5-sonnet",
"Claude 3.5 Sonnet (fast, affordable)",
),
("openai/gpt-4o", "GPT-4o (OpenAI flagship)"),
("openai/gpt-4o-mini", "GPT-4o Mini (fast, cheap)"),
(
"google/gemini-2.0-flash-001",
"Gemini 2.0 Flash (Google, fast)",
),
(
"meta-llama/llama-3.3-70b-instruct",
"Llama 3.3 70B (open source)",
),
("deepseek/deepseek-chat", "DeepSeek Chat (affordable)"),
],
"anthropic" => vec![
(
"claude-sonnet-4-20250514",
"Claude Sonnet 4 (balanced, recommended)",
),
("claude-3-5-sonnet-20241022", "Claude 3.5 Sonnet (fast)"),
(
"claude-3-5-haiku-20241022",
"Claude 3.5 Haiku (fastest, cheapest)",
),
],
"openai" => vec![
("gpt-4o", "GPT-4o (flagship)"),
("gpt-4o-mini", "GPT-4o Mini (fast, cheap)"),
("o1-mini", "o1-mini (reasoning)"),
],
"venice" => vec![
("llama-3.3-70b", "Llama 3.3 70B (default, fast)"),
("claude-opus-45", "Claude Opus 4.5 via Venice (strongest)"),
("llama-3.1-405b", "Llama 3.1 405B (largest open source)"),
],
"groq" => vec![
(
"llama-3.3-70b-versatile",
"Llama 3.3 70B (fast, recommended)",
),
("llama-3.1-8b-instant", "Llama 3.1 8B (instant)"),
("mixtral-8x7b-32768", "Mixtral 8x7B (32K context)"),
],
"mistral" => vec![
("mistral-large-latest", "Mistral Large (flagship)"),
("codestral-latest", "Codestral (code-focused)"),
("mistral-small-latest", "Mistral Small (fast, cheap)"),
],
"deepseek" => vec![
("deepseek-chat", "DeepSeek Chat (V3, recommended)"),
("deepseek-reasoner", "DeepSeek Reasoner (R1)"),
],
"xai" => vec![
("grok-3", "Grok 3 (flagship)"),
("grok-3-mini", "Grok 3 Mini (fast)"),
],
"perplexity" => vec![
("sonar-pro", "Sonar Pro (search + reasoning)"),
("sonar", "Sonar (search, fast)"),
],
"fireworks" => vec![
(
"accounts/fireworks/models/llama-v3p3-70b-instruct",
"Llama 3.3 70B",
),
(
"accounts/fireworks/models/mixtral-8x22b-instruct",
"Mixtral 8x22B",
),
],
"together" => vec![
(
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"Llama 3.1 70B Turbo",
),
(
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"Llama 3.1 8B Turbo",
),
("mistralai/Mixtral-8x22B-Instruct-v0.1", "Mixtral 8x22B"),
],
"cohere" => vec![
("command-r-plus", "Command R+ (flagship)"),
("command-r", "Command R (fast)"),
],
"moonshot" => vec![
("moonshot-v1-128k", "Moonshot V1 128K"),
("moonshot-v1-32k", "Moonshot V1 32K"),
],
"glm" | "zhipu" | "zai" | "z.ai" => vec![
("glm-5", "GLM-5 (latest)"),
("glm-4-plus", "GLM-4 Plus (flagship)"),
("glm-4-flash", "GLM-4 Flash (fast)"),
],
"minimax" => MINIMAX_ONBOARD_MODELS.to_vec(),
"ollama" => vec![
("llama3.2", "Llama 3.2 (recommended local)"),
("mistral", "Mistral 7B"),
("codellama", "Code Llama"),
("phi3", "Phi-3 (small, fast)"),
],
"gemini" | "google" | "google-gemini" => vec![
("gemini-2.0-flash", "Gemini 2.0 Flash (fast, recommended)"),
(
"gemini-2.0-flash-lite",
"Gemini 2.0 Flash Lite (fastest, cheapest)",
),
("gemini-1.5-pro", "Gemini 1.5 Pro (best quality)"),
("gemini-1.5-flash", "Gemini 1.5 Flash (balanced)"),
],
_ => vec![("default", "Default model")],
};
let mut model_options: Vec<(String, String)> = models
.into_iter()
.map(|(model_id, label)| (model_id.to_string(), label.to_string()))
.collect();
let mut live_options: Option<Vec<(String, String)>> = None;
if supports_live_model_fetch(provider_name) {
@ -4206,4 +4339,20 @@ mod tests {
fn provider_env_var_unknown_falls_back() {
assert_eq!(provider_env_var("some-new-provider"), "API_KEY");
}
#[test]
fn default_model_for_minimax_is_m2_5() {
assert_eq!(default_model_for_provider("minimax"), "MiniMax-M2.5");
}
#[test]
fn minimax_onboard_models_include_m2_variants() {
let model_names: Vec<&str> = MINIMAX_ONBOARD_MODELS
.iter()
.map(|(name, _)| *name)
.collect();
assert_eq!(model_names.first().copied(), Some("MiniMax-M2.5"));
assert!(model_names.contains(&"MiniMax-M2.1"));
assert!(model_names.contains(&"MiniMax-M2.1-highspeed"));
}
}

View file

@ -584,7 +584,7 @@ mod tests {
make_provider("Venice", "https://api.venice.ai", None),
make_provider("Moonshot", "https://api.moonshot.cn", None),
make_provider("GLM", "https://open.bigmodel.cn", None),
make_provider("MiniMax", "https://api.minimax.chat", None),
make_provider("MiniMax", "https://api.minimaxi.com/v1", None),
make_provider("Groq", "https://api.groq.com/openai", None),
make_provider("Mistral", "https://api.mistral.ai", None),
make_provider("xAI", "https://api.x.ai", None),
@ -793,6 +793,16 @@ mod tests {
);
}
#[test]
fn chat_completions_url_minimax() {
// MiniMax OpenAI-compatible endpoint requires /v1 base path.
let p = make_provider("minimax", "https://api.minimaxi.com/v1", None);
assert_eq!(
p.chat_completions_url(),
"https://api.minimaxi.com/v1/chat/completions"
);
}
#[test]
fn chat_completions_url_glm() {
// GLM (BigModel) uses /api/paas/v4 base path

View file

@ -221,7 +221,10 @@ pub fn create_provider(name: &str, api_key: Option<&str>) -> anyhow::Result<Box<
"GLM", "https://open.bigmodel.cn/api/paas/v4", key, AuthStyle::Bearer,
))),
"minimax" => Ok(Box::new(OpenAiCompatibleProvider::new(
"MiniMax", "https://api.minimax.chat", key, AuthStyle::Bearer,
"MiniMax",
"https://api.minimaxi.com/v1",
key,
AuthStyle::Bearer,
))),
"bedrock" | "aws-bedrock" => Ok(Box::new(OpenAiCompatibleProvider::new(
"Amazon Bedrock",

View file

@ -558,7 +558,7 @@ mod tests {
use std::path::Path;
use tempfile::TempDir;
fn test_tool(dir: &Path) -> GitOperationsTool {
fn test_tool(dir: &std::path::Path) -> GitOperationsTool {
let security = Arc::new(SecurityPolicy {
autonomy: AutonomyLevel::Supervised,
..SecurityPolicy::default()