fix(channel): store raw user message and skip memory recall with history
Two fixes for conversation history quality: 1. Store raw msg.content in ConversationHistoryMap instead of enriched_message — memory context is ephemeral per-request and pollutes future turns when persisted. 2. Skip memory recall when conversation history exists — prior turns already provide context. Memory recall adds noise and can mislead the model (e.g. old 'seen' entries overshadowing a code variable named seen in the current conversation).
This commit is contained in:
parent
8cafeb02e8
commit
8c826e581c
1 changed files with 17 additions and 15 deletions
|
|
@ -1111,10 +1111,6 @@ async fn process_channel_message(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let memory_context =
|
|
||||||
build_memory_context(ctx.memory.as_ref(), &msg.content, ctx.min_relevance_score).await;
|
|
||||||
|
|
||||||
if ctx.auto_save_memory && msg.content.chars().count() >= AUTOSAVE_MIN_MESSAGE_CHARS {
|
if ctx.auto_save_memory && msg.content.chars().count() >= AUTOSAVE_MIN_MESSAGE_CHARS {
|
||||||
let autosave_key = conversation_memory_key(&msg);
|
let autosave_key = conversation_memory_key(&msg);
|
||||||
let _ = ctx
|
let _ = ctx
|
||||||
|
|
@ -1128,15 +1124,16 @@ async fn process_channel_message(
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let enriched_message = if memory_context.is_empty() {
|
|
||||||
msg.content.clone()
|
|
||||||
} else {
|
|
||||||
format!("{memory_context}{}", msg.content)
|
|
||||||
};
|
|
||||||
|
|
||||||
println!(" ⏳ Processing message...");
|
println!(" ⏳ Processing message...");
|
||||||
let started_at = Instant::now();
|
let started_at = Instant::now();
|
||||||
|
|
||||||
|
let had_prior_history = ctx
|
||||||
|
.conversation_histories
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|e| e.into_inner())
|
||||||
|
.get(&history_key)
|
||||||
|
.is_some_and(|turns| !turns.is_empty());
|
||||||
|
|
||||||
// Preserve user turn before the LLM call so interrupted requests keep context.
|
// Preserve user turn before the LLM call so interrupted requests keep context.
|
||||||
append_sender_turn(ctx.as_ref(), &history_key, ChatMessage::user(&msg.content));
|
append_sender_turn(ctx.as_ref(), &history_key, ChatMessage::user(&msg.content));
|
||||||
|
|
||||||
|
|
@ -1149,11 +1146,16 @@ async fn process_channel_message(
|
||||||
.cloned()
|
.cloned()
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
let mut prior_turns = normalize_cached_channel_turns(prior_turns_raw);
|
let mut prior_turns = normalize_cached_channel_turns(prior_turns_raw);
|
||||||
// Keep persisted history clean (raw user text), but inject memory context
|
|
||||||
// for the current provider call by enriching the newest user turn only.
|
// Only enrich with memory context when there is no prior conversation
|
||||||
|
// history. Follow-up turns already include context from previous messages.
|
||||||
|
if !had_prior_history {
|
||||||
|
let memory_context =
|
||||||
|
build_memory_context(ctx.memory.as_ref(), &msg.content, ctx.min_relevance_score).await;
|
||||||
if let Some(last_turn) = prior_turns.last_mut() {
|
if let Some(last_turn) = prior_turns.last_mut() {
|
||||||
if last_turn.role == "user" {
|
if last_turn.role == "user" && !memory_context.is_empty() {
|
||||||
last_turn.content = enriched_message.clone();
|
last_turn.content = format!("{memory_context}{}", msg.content);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue