readd tests, remove markdown files

This commit is contained in:
Alex Gorevski 2026-02-17 16:08:53 -08:00 committed by Chummy
parent e2634c72c2
commit 9a6fa76825
17 changed files with 1352 additions and 0 deletions

View file

@ -348,4 +348,76 @@ mod tests {
let result = cache.get(&key).unwrap();
assert_eq!(result.as_deref(), Some("はい、Rustは素晴らしい"));
}
// ── §4.4 Cache eviction under pressure tests ─────────────
#[test]
fn lru_eviction_keeps_most_recent() {
let tmp = TempDir::new().unwrap();
let cache = ResponseCache::new(tmp.path(), 60, 3).unwrap();
// Insert 3 entries
for i in 0..3 {
let key = ResponseCache::cache_key("gpt-4", None, &format!("prompt {i}"));
cache
.put(&key, "gpt-4", &format!("response {i}"), 10)
.unwrap();
}
// Access entry 0 to make it recently used
let key0 = ResponseCache::cache_key("gpt-4", None, "prompt 0");
let _ = cache.get(&key0).unwrap();
// Insert entry 3 (triggers eviction)
let key3 = ResponseCache::cache_key("gpt-4", None, "prompt 3");
cache.put(&key3, "gpt-4", "response 3", 10).unwrap();
let (count, _, _) = cache.stats().unwrap();
assert!(count <= 3, "cache must not exceed max_entries");
// Entry 0 was recently accessed and should survive
let entry0 = cache.get(&key0).unwrap();
assert!(
entry0.is_some(),
"recently accessed entry should survive LRU eviction"
);
}
#[test]
fn cache_handles_zero_max_entries() {
let tmp = TempDir::new().unwrap();
let cache = ResponseCache::new(tmp.path(), 60, 0).unwrap();
let key = ResponseCache::cache_key("gpt-4", None, "test");
// Should not panic even with max_entries=0
cache.put(&key, "gpt-4", "response", 10).unwrap();
let (count, _, _) = cache.stats().unwrap();
assert_eq!(count, 0, "cache with max_entries=0 should evict everything");
}
#[test]
fn cache_concurrent_reads_no_panic() {
let tmp = TempDir::new().unwrap();
let cache = std::sync::Arc::new(ResponseCache::new(tmp.path(), 60, 100).unwrap());
let key = ResponseCache::cache_key("gpt-4", None, "concurrent");
cache.put(&key, "gpt-4", "response", 10).unwrap();
let mut handles = Vec::new();
for _ in 0..10 {
let cache = std::sync::Arc::clone(&cache);
let key = key.clone();
handles.push(std::thread::spawn(move || {
let _ = cache.get(&key).unwrap();
}));
}
for handle in handles {
handle.join().unwrap();
}
let (_, hits, _) = cache.stats().unwrap();
assert_eq!(hits, 10, "all concurrent reads should register as hits");
}
}

View file

@ -1666,4 +1666,117 @@ mod tests {
assert_eq!(results[0].session_id.as_deref(), Some("sess-x"));
}
}
// ── §4.1 Concurrent write contention tests ──────────────
#[tokio::test]
async fn sqlite_concurrent_writes_no_data_loss() {
let (_tmp, mem) = temp_sqlite();
let mem = std::sync::Arc::new(mem);
let mut handles = Vec::new();
for i in 0..10 {
let mem = std::sync::Arc::clone(&mem);
handles.push(tokio::spawn(async move {
mem.store(
&format!("concurrent_key_{i}"),
&format!("value_{i}"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}));
}
for handle in handles {
handle.await.unwrap();
}
let count = mem.count().await.unwrap();
assert_eq!(
count, 10,
"all 10 concurrent writes must succeed without data loss"
);
}
#[tokio::test]
async fn sqlite_concurrent_read_write_no_panic() {
let (_tmp, mem) = temp_sqlite();
let mem = std::sync::Arc::new(mem);
// Pre-populate
mem.store("shared_key", "initial", MemoryCategory::Core, None)
.await
.unwrap();
let mut handles = Vec::new();
// Concurrent reads
for _ in 0..5 {
let mem = std::sync::Arc::clone(&mem);
handles.push(tokio::spawn(async move {
let _ = mem.get("shared_key").await.unwrap();
}));
}
// Concurrent writes
for i in 0..5 {
let mem = std::sync::Arc::clone(&mem);
handles.push(tokio::spawn(async move {
mem.store(
&format!("key_{i}"),
&format!("val_{i}"),
MemoryCategory::Core,
None,
)
.await
.unwrap();
}));
}
for handle in handles {
handle.await.unwrap();
}
// Should have 6 total entries (1 pre-existing + 5 new)
assert_eq!(mem.count().await.unwrap(), 6);
}
// ── §4.2 Reindex / corruption recovery tests ────────────
#[tokio::test]
async fn sqlite_reindex_preserves_data() {
let (_tmp, mem) = temp_sqlite();
mem.store("a", "Rust is fast", MemoryCategory::Core, None)
.await
.unwrap();
mem.store("b", "Python is interpreted", MemoryCategory::Core, None)
.await
.unwrap();
mem.reindex().await.unwrap();
let count = mem.count().await.unwrap();
assert_eq!(count, 2, "reindex must preserve all entries");
let entry = mem.get("a").await.unwrap();
assert!(entry.is_some());
assert_eq!(entry.unwrap().content, "Rust is fast");
}
#[tokio::test]
async fn sqlite_reindex_idempotent() {
let (_tmp, mem) = temp_sqlite();
mem.store("x", "test data", MemoryCategory::Core, None)
.await
.unwrap();
// Multiple reindex calls should be safe
mem.reindex().await.unwrap();
mem.reindex().await.unwrap();
mem.reindex().await.unwrap();
assert_eq!(mem.count().await.unwrap(), 1);
}
}