fix: replace std::sync::Mutex with parking_lot::Mutex (#350)

Merges #422
This commit is contained in:
Argenis 2026-02-16 15:02:46 -05:00 committed by GitHub
parent bff0507132
commit 15e1d50a5d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 1595 additions and 17 deletions

1
Cargo.lock generated
View file

@ -4840,6 +4840,7 @@ dependencies = [
"opentelemetry",
"opentelemetry-otlp",
"opentelemetry_sdk",
"parking_lot",
"pdf-extract",
"probe-rs",
"prometheus",

View file

@ -60,6 +60,9 @@ hex = "0.4"
# CSPRNG for secure token generation
rand = "0.8"
# Fast mutexes that don't poison on panic
parking_lot = "0.12"
# Landlock (Linux sandbox) - optional dependency
landlock = { version = "0.4", optional = true }

View file

@ -508,6 +508,17 @@ ZeroClaw is an open-source project maintained with passion. If you find it usefu
<a href="https://buymeacoffee.com/argenistherose"><img src="https://img.shields.io/badge/Buy%20Me%20a%20Coffee-Donate-yellow.svg?style=for-the-badge&logo=buy-me-a-coffee" alt="Buy Me a Coffee" /></a>
### 🙏 Special Thanks
A heartfelt thank you to the communities and institutions that inspire and fuel this open-source work:
- **Harvard University** — for fostering intellectual curiosity and pushing the boundaries of what's possible.
- **MIT** — for championing open knowledge, open source, and the belief that technology should be accessible to everyone.
- **Sundai Club** — for the community, the energy, and the relentless drive to build things that matter.
- **The World & Beyond** 🌍✨ — to every contributor, dreamer, and builder out there making open source a force for good. This is for you.
We're building in the open because the best ideas come from everywhere. If you're reading this, you're part of it. Welcome. 🦀❤️
## License
MIT — see [LICENSE](LICENSE)
@ -524,6 +535,7 @@ See [CONTRIBUTING.md](CONTRIBUTING.md). Implement a trait, submit a PR:
- New `Tunnel``src/tunnel/`
- New `Skill``~/.zeroclaw/workspace/skills/<name>/`
---
**ZeroClaw** — Zero overhead. Zero compromise. Deploy anywhere. Swap anything. 🦀

View file

@ -743,6 +743,28 @@ pub struct MemoryConfig {
/// Max tokens per chunk for document splitting
#[serde(default = "default_chunk_size")]
pub chunk_max_tokens: usize,
// ── Response Cache (saves tokens on repeated prompts) ──────
/// Enable LLM response caching to avoid paying for duplicate prompts
#[serde(default)]
pub response_cache_enabled: bool,
/// TTL in minutes for cached responses (default: 60)
#[serde(default = "default_response_cache_ttl")]
pub response_cache_ttl_minutes: u32,
/// Max number of cached responses before LRU eviction (default: 5000)
#[serde(default = "default_response_cache_max")]
pub response_cache_max_entries: usize,
// ── Memory Snapshot (soul backup to Markdown) ─────────────
/// Enable periodic export of core memories to MEMORY_SNAPSHOT.md
#[serde(default)]
pub snapshot_enabled: bool,
/// Run snapshot during hygiene passes (heartbeat-driven)
#[serde(default)]
pub snapshot_on_hygiene: bool,
/// Auto-hydrate from MEMORY_SNAPSHOT.md when brain.db is missing
#[serde(default = "default_true")]
pub auto_hydrate: bool,
}
fn default_embedding_provider() -> String {
@ -778,6 +800,12 @@ fn default_cache_size() -> usize {
fn default_chunk_size() -> usize {
512
}
fn default_response_cache_ttl() -> u32 {
60
}
fn default_response_cache_max() -> usize {
5_000
}
impl Default for MemoryConfig {
fn default() -> Self {
@ -795,6 +823,12 @@ impl Default for MemoryConfig {
keyword_weight: default_keyword_weight(),
embedding_cache_size: default_cache_size(),
chunk_max_tokens: default_chunk_size(),
response_cache_enabled: false,
response_cache_ttl_minutes: default_response_cache_ttl(),
response_cache_max_entries: default_response_cache_max(),
snapshot_enabled: false,
snapshot_on_hygiene: false,
auto_hydrate: true,
}
}
}

View file

@ -422,6 +422,16 @@ fn with_connection<T>(config: &Config, f: impl FnOnce(&Connection) -> Result<T>)
let conn = Connection::open(&db_path)
.with_context(|| format!("Failed to open cron DB: {}", db_path.display()))?;
// ── Production-grade PRAGMA tuning ──────────────────────
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA mmap_size = 8388608;
PRAGMA cache_size = -2000;
PRAGMA temp_store = MEMORY;",
)
.context("Failed to set cron DB PRAGMAs")?;
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;

View file

@ -5,6 +5,8 @@ pub mod hygiene;
pub mod lucid;
pub mod markdown;
pub mod none;
pub mod response_cache;
pub mod snapshot;
pub mod sqlite;
pub mod traits;
pub mod vector;
@ -17,6 +19,7 @@ pub use backend::{
pub use lucid::LucidMemory;
pub use markdown::MarkdownMemory;
pub use none::NoneMemory;
pub use response_cache::ResponseCache;
pub use sqlite::SqliteMemory;
pub use traits::Memory;
#[allow(unused_imports)]
@ -63,6 +66,32 @@ pub fn create_memory(
tracing::warn!("memory hygiene skipped: {e}");
}
// If snapshot_on_hygiene is enabled, export core memories during hygiene.
if config.snapshot_enabled && config.snapshot_on_hygiene {
if let Err(e) = snapshot::export_snapshot(workspace_dir) {
tracing::warn!("memory snapshot skipped: {e}");
}
}
// Auto-hydration: if brain.db is missing but MEMORY_SNAPSHOT.md exists,
// restore the "soul" from the snapshot before creating the backend.
if config.auto_hydrate
&& matches!(classify_memory_backend(&config.backend), MemoryBackendKind::Sqlite | MemoryBackendKind::Lucid)
&& snapshot::should_hydrate(workspace_dir)
{
tracing::info!("🧬 Cold boot detected — hydrating from MEMORY_SNAPSHOT.md");
match snapshot::hydrate_from_snapshot(workspace_dir) {
Ok(count) => {
if count > 0 {
tracing::info!("🧬 Hydrated {count} core memories from snapshot");
}
}
Err(e) => {
tracing::warn!("memory hydration failed: {e}");
}
}
}
fn build_sqlite_memory(
config: &MemoryConfig,
workspace_dir: &Path,
@ -113,6 +142,35 @@ pub fn create_memory_for_migration(
)
}
/// Factory: create an optional response cache from config.
pub fn create_response_cache(
config: &MemoryConfig,
workspace_dir: &Path,
) -> Option<ResponseCache> {
if !config.response_cache_enabled {
return None;
}
match ResponseCache::new(
workspace_dir,
config.response_cache_ttl_minutes,
config.response_cache_max_entries,
) {
Ok(cache) => {
tracing::info!(
"💾 Response cache enabled (TTL: {}min, max: {} entries)",
config.response_cache_ttl_minutes,
config.response_cache_max_entries
);
Some(cache)
}
Err(e) => {
tracing::warn!("Response cache disabled due to error: {e}");
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View file

@ -0,0 +1,371 @@
//! Response cache — avoid burning tokens on repeated prompts.
//!
//! Stores LLM responses in a separate SQLite table keyed by a SHA-256 hash of
//! `(model, system_prompt_hash, user_prompt)`. Entries expire after a
//! configurable TTL (default: 1 hour). The cache is optional and disabled by
//! default — users opt in via `[memory] response_cache_enabled = true`.
use anyhow::Result;
use chrono::{Duration, Local};
use rusqlite::{params, Connection};
use sha2::{Digest, Sha256};
use std::path::{Path, PathBuf};
use std::sync::Mutex;
/// Response cache backed by a dedicated SQLite database.
///
/// Lives alongside `brain.db` as `response_cache.db` so it can be
/// independently wiped without touching memories.
pub struct ResponseCache {
conn: Mutex<Connection>,
#[allow(dead_code)]
db_path: PathBuf,
ttl_minutes: i64,
max_entries: usize,
}
impl ResponseCache {
/// Open (or create) the response cache database.
pub fn new(workspace_dir: &Path, ttl_minutes: u32, max_entries: usize) -> Result<Self> {
let db_dir = workspace_dir.join("memory");
std::fs::create_dir_all(&db_dir)?;
let db_path = db_dir.join("response_cache.db");
let conn = Connection::open(&db_path)?;
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA temp_store = MEMORY;",
)?;
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS response_cache (
prompt_hash TEXT PRIMARY KEY,
model TEXT NOT NULL,
response TEXT NOT NULL,
token_count INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL,
accessed_at TEXT NOT NULL,
hit_count INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_rc_accessed ON response_cache(accessed_at);
CREATE INDEX IF NOT EXISTS idx_rc_created ON response_cache(created_at);",
)?;
Ok(Self {
conn: Mutex::new(conn),
db_path,
ttl_minutes: i64::from(ttl_minutes),
max_entries,
})
}
/// Build a deterministic cache key from model + system prompt + user prompt.
pub fn cache_key(model: &str, system_prompt: Option<&str>, user_prompt: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(model.as_bytes());
hasher.update(b"|");
if let Some(sys) = system_prompt {
hasher.update(sys.as_bytes());
}
hasher.update(b"|");
hasher.update(user_prompt.as_bytes());
let hash = hasher.finalize();
format!("{:064x}", hash)
}
/// Look up a cached response. Returns `None` on miss or expired entry.
pub fn get(&self, key: &str) -> Result<Option<String>> {
let conn = self
.conn
.lock()
.map_err(|e| anyhow::anyhow!("Lock error: {e}"))?;
let now = Local::now();
let cutoff = (now - Duration::minutes(self.ttl_minutes)).to_rfc3339();
let mut stmt = conn.prepare(
"SELECT response FROM response_cache
WHERE prompt_hash = ?1 AND created_at > ?2",
)?;
let result: Option<String> = stmt
.query_row(params![key, cutoff], |row| row.get(0))
.ok();
if result.is_some() {
// Bump hit count and accessed_at
let now_str = now.to_rfc3339();
conn.execute(
"UPDATE response_cache
SET accessed_at = ?1, hit_count = hit_count + 1
WHERE prompt_hash = ?2",
params![now_str, key],
)?;
}
Ok(result)
}
/// Store a response in the cache.
pub fn put(
&self,
key: &str,
model: &str,
response: &str,
token_count: u32,
) -> Result<()> {
let conn = self
.conn
.lock()
.map_err(|e| anyhow::anyhow!("Lock error: {e}"))?;
let now = Local::now().to_rfc3339();
conn.execute(
"INSERT OR REPLACE INTO response_cache
(prompt_hash, model, response, token_count, created_at, accessed_at, hit_count)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, 0)",
params![key, model, response, token_count, now, now],
)?;
// Evict expired entries
let cutoff = (Local::now() - Duration::minutes(self.ttl_minutes)).to_rfc3339();
conn.execute(
"DELETE FROM response_cache WHERE created_at <= ?1",
params![cutoff],
)?;
// LRU eviction if over max_entries
#[allow(clippy::cast_possible_wrap)]
let max = self.max_entries as i64;
conn.execute(
"DELETE FROM response_cache WHERE prompt_hash IN (
SELECT prompt_hash FROM response_cache
ORDER BY accessed_at ASC
LIMIT MAX(0, (SELECT COUNT(*) FROM response_cache) - ?1)
)",
params![max],
)?;
Ok(())
}
/// Return cache statistics: (total_entries, total_hits, total_tokens_saved).
pub fn stats(&self) -> Result<(usize, u64, u64)> {
let conn = self
.conn
.lock()
.map_err(|e| anyhow::anyhow!("Lock error: {e}"))?;
let count: i64 =
conn.query_row("SELECT COUNT(*) FROM response_cache", [], |row| row.get(0))?;
let hits: i64 = conn
.query_row(
"SELECT COALESCE(SUM(hit_count), 0) FROM response_cache",
[],
|row| row.get(0),
)?;
let tokens_saved: i64 = conn
.query_row(
"SELECT COALESCE(SUM(token_count * hit_count), 0) FROM response_cache",
[],
|row| row.get(0),
)?;
#[allow(clippy::cast_sign_loss)]
Ok((count as usize, hits as u64, tokens_saved as u64))
}
/// Wipe the entire cache (useful for `zeroclaw cache clear`).
pub fn clear(&self) -> Result<usize> {
let conn = self
.conn
.lock()
.map_err(|e| anyhow::anyhow!("Lock error: {e}"))?;
let affected = conn.execute("DELETE FROM response_cache", [])?;
Ok(affected)
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn temp_cache(ttl_minutes: u32) -> (TempDir, ResponseCache) {
let tmp = TempDir::new().unwrap();
let cache = ResponseCache::new(tmp.path(), ttl_minutes, 1000).unwrap();
(tmp, cache)
}
#[test]
fn cache_key_deterministic() {
let k1 = ResponseCache::cache_key("gpt-4", Some("sys"), "hello");
let k2 = ResponseCache::cache_key("gpt-4", Some("sys"), "hello");
assert_eq!(k1, k2);
assert_eq!(k1.len(), 64); // SHA-256 hex
}
#[test]
fn cache_key_varies_by_model() {
let k1 = ResponseCache::cache_key("gpt-4", None, "hello");
let k2 = ResponseCache::cache_key("claude-3", None, "hello");
assert_ne!(k1, k2);
}
#[test]
fn cache_key_varies_by_system_prompt() {
let k1 = ResponseCache::cache_key("gpt-4", Some("You are helpful"), "hello");
let k2 = ResponseCache::cache_key("gpt-4", Some("You are rude"), "hello");
assert_ne!(k1, k2);
}
#[test]
fn cache_key_varies_by_prompt() {
let k1 = ResponseCache::cache_key("gpt-4", None, "hello");
let k2 = ResponseCache::cache_key("gpt-4", None, "goodbye");
assert_ne!(k1, k2);
}
#[test]
fn put_and_get() {
let (_tmp, cache) = temp_cache(60);
let key = ResponseCache::cache_key("gpt-4", None, "What is Rust?");
cache
.put(&key, "gpt-4", "Rust is a systems programming language.", 25)
.unwrap();
let result = cache.get(&key).unwrap();
assert_eq!(
result.as_deref(),
Some("Rust is a systems programming language.")
);
}
#[test]
fn miss_returns_none() {
let (_tmp, cache) = temp_cache(60);
let result = cache.get("nonexistent_key").unwrap();
assert!(result.is_none());
}
#[test]
fn expired_entry_returns_none() {
let (_tmp, cache) = temp_cache(0); // 0-minute TTL → everything is instantly expired
let key = ResponseCache::cache_key("gpt-4", None, "test");
cache.put(&key, "gpt-4", "response", 10).unwrap();
// The entry was created with created_at = now(), but TTL is 0 minutes,
// so cutoff = now() - 0 = now(). The entry's created_at is NOT > cutoff.
let result = cache.get(&key).unwrap();
assert!(result.is_none());
}
#[test]
fn hit_count_incremented() {
let (_tmp, cache) = temp_cache(60);
let key = ResponseCache::cache_key("gpt-4", None, "hello");
cache.put(&key, "gpt-4", "Hi!", 5).unwrap();
// 3 hits
for _ in 0..3 {
let _ = cache.get(&key).unwrap();
}
let (_, total_hits, _) = cache.stats().unwrap();
assert_eq!(total_hits, 3);
}
#[test]
fn tokens_saved_calculated() {
let (_tmp, cache) = temp_cache(60);
let key = ResponseCache::cache_key("gpt-4", None, "explain rust");
cache.put(&key, "gpt-4", "Rust is...", 100).unwrap();
// 5 cache hits × 100 tokens = 500 tokens saved
for _ in 0..5 {
let _ = cache.get(&key).unwrap();
}
let (_, _, tokens_saved) = cache.stats().unwrap();
assert_eq!(tokens_saved, 500);
}
#[test]
fn lru_eviction() {
let tmp = TempDir::new().unwrap();
let cache = ResponseCache::new(tmp.path(), 60, 3).unwrap(); // max 3 entries
for i in 0..5 {
let key = ResponseCache::cache_key("gpt-4", None, &format!("prompt {i}"));
cache
.put(&key, "gpt-4", &format!("response {i}"), 10)
.unwrap();
}
let (count, _, _) = cache.stats().unwrap();
assert!(count <= 3, "Should have at most 3 entries after eviction");
}
#[test]
fn clear_wipes_all() {
let (_tmp, cache) = temp_cache(60);
for i in 0..10 {
let key = ResponseCache::cache_key("gpt-4", None, &format!("prompt {i}"));
cache
.put(&key, "gpt-4", &format!("response {i}"), 10)
.unwrap();
}
let cleared = cache.clear().unwrap();
assert_eq!(cleared, 10);
let (count, _, _) = cache.stats().unwrap();
assert_eq!(count, 0);
}
#[test]
fn stats_empty_cache() {
let (_tmp, cache) = temp_cache(60);
let (count, hits, tokens) = cache.stats().unwrap();
assert_eq!(count, 0);
assert_eq!(hits, 0);
assert_eq!(tokens, 0);
}
#[test]
fn overwrite_same_key() {
let (_tmp, cache) = temp_cache(60);
let key = ResponseCache::cache_key("gpt-4", None, "question");
cache.put(&key, "gpt-4", "answer v1", 20).unwrap();
cache.put(&key, "gpt-4", "answer v2", 25).unwrap();
let result = cache.get(&key).unwrap();
assert_eq!(result.as_deref(), Some("answer v2"));
let (count, _, _) = cache.stats().unwrap();
assert_eq!(count, 1);
}
#[test]
fn unicode_prompt_handling() {
let (_tmp, cache) = temp_cache(60);
let key = ResponseCache::cache_key("gpt-4", None, "日本語のテスト 🦀");
cache.put(&key, "gpt-4", "はい、Rustは素晴らしい", 30).unwrap();
let result = cache.get(&key).unwrap();
assert_eq!(result.as_deref(), Some("はい、Rustは素晴らしい"));
}
}

467
src/memory/snapshot.rs Normal file
View file

@ -0,0 +1,467 @@
//! Memory snapshot — export/import core memories as human-readable Markdown.
//!
//! **Atomic Soul Export**: dumps `MemoryCategory::Core` from SQLite into
//! `MEMORY_SNAPSHOT.md` so the agent's "soul" is always Git-visible.
//!
//! **Auto-Hydration**: if `brain.db` is missing but `MEMORY_SNAPSHOT.md` exists,
//! re-indexes all entries back into a fresh SQLite database.
use anyhow::Result;
use chrono::Local;
use rusqlite::{params, Connection};
use std::fs;
use std::path::{Path, PathBuf};
/// Filename for the snapshot (lives at workspace root for Git visibility).
pub const SNAPSHOT_FILENAME: &str = "MEMORY_SNAPSHOT.md";
/// Header written at the top of every snapshot file.
const SNAPSHOT_HEADER: &str = "# 🧠 ZeroClaw Memory Snapshot\n\n\
> Auto-generated by ZeroClaw. Do not edit manually unless you know what you're doing.\n\
> This file is the \"soul\" of your agent — if `brain.db` is lost, start the agent\n\
> in this workspace and it will auto-hydrate from this file.\n\n";
/// Export all `Core` memories from SQLite → `MEMORY_SNAPSHOT.md`.
///
/// Returns the number of entries exported.
pub fn export_snapshot(workspace_dir: &Path) -> Result<usize> {
let db_path = workspace_dir.join("memory").join("brain.db");
if !db_path.exists() {
tracing::debug!("snapshot export skipped: brain.db does not exist");
return Ok(0);
}
let conn = Connection::open(&db_path)?;
conn.execute_batch("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL;")?;
let mut stmt = conn.prepare(
"SELECT key, content, category, created_at, updated_at
FROM memories
WHERE category = 'core'
ORDER BY updated_at DESC",
)?;
let rows: Vec<(String, String, String, String, String)> = stmt
.query_map([], |row| {
Ok((
row.get(0)?,
row.get(1)?,
row.get(2)?,
row.get(3)?,
row.get(4)?,
))
})?
.filter_map(|r| r.ok())
.collect();
if rows.is_empty() {
tracing::debug!("snapshot export: no core memories to export");
return Ok(0);
}
let mut output = String::with_capacity(rows.len() * 200);
output.push_str(SNAPSHOT_HEADER);
let now = Local::now().format("%Y-%m-%d %H:%M:%S").to_string();
output.push_str(&format!("**Last exported:** {now}\n\n"));
output.push_str(&format!("**Total core memories:** {}\n\n---\n\n", rows.len()));
for (key, content, _category, created_at, updated_at) in &rows {
output.push_str(&format!("### 🔑 `{key}`\n\n"));
output.push_str(&format!("{content}\n\n"));
output.push_str(&format!(
"*Created: {created_at} | Updated: {updated_at}*\n\n---\n\n"
));
}
let snapshot_path = snapshot_path(workspace_dir);
fs::write(&snapshot_path, output)?;
tracing::info!(
"📸 Memory snapshot exported: {} core memories → {}",
rows.len(),
snapshot_path.display()
);
Ok(rows.len())
}
/// Import memories from `MEMORY_SNAPSHOT.md` into SQLite.
///
/// Called during cold-boot when `brain.db` doesn't exist but the snapshot does.
/// Returns the number of entries hydrated.
pub fn hydrate_from_snapshot(workspace_dir: &Path) -> Result<usize> {
let snapshot = snapshot_path(workspace_dir);
if !snapshot.exists() {
return Ok(0);
}
let content = fs::read_to_string(&snapshot)?;
let entries = parse_snapshot(&content);
if entries.is_empty() {
return Ok(0);
}
// Ensure the memory directory exists
let db_dir = workspace_dir.join("memory");
fs::create_dir_all(&db_dir)?;
let db_path = db_dir.join("brain.db");
let conn = Connection::open(&db_path)?;
conn.execute_batch("PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL;")?;
// Initialize schema (same as SqliteMemory::init_schema)
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS memories (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
content TEXT NOT NULL,
category TEXT NOT NULL DEFAULT 'core',
embedding BLOB,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_mem_key ON memories(key);
CREATE INDEX IF NOT EXISTS idx_mem_cat ON memories(category);
CREATE INDEX IF NOT EXISTS idx_mem_updated ON memories(updated_at);
CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts
USING fts5(key, content, content='memories', content_rowid='rowid');
CREATE TABLE IF NOT EXISTS embedding_cache (
content_hash TEXT PRIMARY KEY,
embedding BLOB NOT NULL,
created_at TEXT NOT NULL
);",
)?;
let now = Local::now().to_rfc3339();
let mut hydrated = 0;
for (key, content) in &entries {
let id = uuid::Uuid::new_v4().to_string();
let result = conn.execute(
"INSERT OR IGNORE INTO memories (id, key, content, category, created_at, updated_at)
VALUES (?1, ?2, ?3, 'core', ?4, ?5)",
params![id, key, content, now, now],
);
match result {
Ok(changed) if changed > 0 => {
// Populate FTS5
let _ = conn.execute(
"INSERT INTO memories_fts(key, content) VALUES (?1, ?2)",
params![key, content],
);
hydrated += 1;
}
Ok(_) => {
tracing::debug!("hydrate: key '{key}' already exists, skipping");
}
Err(e) => {
tracing::warn!("hydrate: failed to insert key '{key}': {e}");
}
}
}
tracing::info!(
"🧬 Memory hydration complete: {} entries restored from {}",
hydrated,
snapshot.display()
);
Ok(hydrated)
}
/// Check if we should auto-hydrate on startup.
///
/// Returns `true` if:
/// 1. `brain.db` does NOT exist (or is empty)
/// 2. `MEMORY_SNAPSHOT.md` DOES exist
pub fn should_hydrate(workspace_dir: &Path) -> bool {
let db_path = workspace_dir.join("memory").join("brain.db");
let snapshot = snapshot_path(workspace_dir);
let db_missing_or_empty = if db_path.exists() {
// DB exists but might be empty (freshly created)
fs::metadata(&db_path)
.map(|m| m.len() < 4096) // SQLite header is ~4096 bytes minimum
.unwrap_or(true)
} else {
true
};
db_missing_or_empty && snapshot.exists()
}
/// Path to the snapshot file.
fn snapshot_path(workspace_dir: &Path) -> PathBuf {
workspace_dir.join(SNAPSHOT_FILENAME)
}
/// Parse the structured markdown snapshot back into (key, content) pairs.
fn parse_snapshot(input: &str) -> Vec<(String, String)> {
let mut entries = Vec::new();
let mut current_key: Option<String> = None;
let mut current_content = String::new();
for line in input.lines() {
let trimmed = line.trim();
// Match: ### 🔑 `key_name`
if trimmed.starts_with("### 🔑 `") && trimmed.ends_with('`') {
// Save previous entry
if let Some(key) = current_key.take() {
let content = current_content.trim().to_string();
if !content.is_empty() {
entries.push((key, content));
}
}
// Extract new key
let key = trimmed
.strip_prefix("### 🔑 `")
.and_then(|s| s.strip_suffix('`'))
.unwrap_or("")
.to_string();
if !key.is_empty() {
current_key = Some(key);
current_content = String::new();
}
} else if current_key.is_some() {
// Skip metadata lines and separators
if trimmed.starts_with("*Created:") || trimmed == "---" {
continue;
}
// Accumulate content
if !current_content.is_empty() || !trimmed.is_empty() {
if !current_content.is_empty() {
current_content.push('\n');
}
current_content.push_str(line);
}
}
}
// Don't forget the last entry
if let Some(key) = current_key {
let content = current_content.trim().to_string();
if !content.is_empty() {
entries.push((key, content));
}
}
entries
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[test]
fn parse_snapshot_basic() {
let input = r#"# 🧠 ZeroClaw Memory Snapshot
> Auto-generated by ZeroClaw.
**Last exported:** 2025-01-15 14:30:00
**Total core memories:** 2
---
### 🔑 `identity`
I am ZeroClaw, a self-preserving AI agent.
*Created: 2025-01-15 | Updated: 2025-01-15*
---
### 🔑 `preference_lang`
The user prefers Rust for systems programming.
*Created: 2025-01-14 | Updated: 2025-01-15*
---
"#;
let entries = parse_snapshot(input);
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].0, "identity");
assert!(entries[0].1.contains("self-preserving"));
assert_eq!(entries[1].0, "preference_lang");
assert!(entries[1].1.contains("Rust"));
}
#[test]
fn parse_snapshot_empty() {
let input = "# 🧠 ZeroClaw Memory Snapshot\n\n> Nothing here.\n";
let entries = parse_snapshot(input);
assert!(entries.is_empty());
}
#[test]
fn parse_snapshot_multiline_content() {
let input = r#"### 🔑 `rules`
Rule 1: Always be helpful.
Rule 2: Never lie.
Rule 3: Protect the user.
*Created: 2025-01-15 | Updated: 2025-01-15*
---
"#;
let entries = parse_snapshot(input);
assert_eq!(entries.len(), 1);
assert!(entries[0].1.contains("Rule 1"));
assert!(entries[0].1.contains("Rule 3"));
}
#[test]
fn export_no_db_returns_zero() {
let tmp = TempDir::new().unwrap();
let count = export_snapshot(tmp.path()).unwrap();
assert_eq!(count, 0);
}
#[test]
fn export_and_hydrate_roundtrip() {
let tmp = TempDir::new().unwrap();
let workspace = tmp.path();
// Create a brain.db manually with some core memories
let db_dir = workspace.join("memory");
fs::create_dir_all(&db_dir).unwrap();
let db_path = db_dir.join("brain.db");
let conn = Connection::open(&db_path).unwrap();
conn.execute_batch(
"PRAGMA journal_mode = WAL;
CREATE TABLE IF NOT EXISTS memories (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
content TEXT NOT NULL,
category TEXT NOT NULL DEFAULT 'core',
embedding BLOB,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_mem_key ON memories(key);",
)
.unwrap();
let now = Local::now().to_rfc3339();
conn.execute(
"INSERT INTO memories (id, key, content, category, created_at, updated_at)
VALUES ('id1', 'identity', 'I am a test agent', 'core', ?1, ?2)",
params![now, now],
)
.unwrap();
conn.execute(
"INSERT INTO memories (id, key, content, category, created_at, updated_at)
VALUES ('id2', 'preference', 'User likes Rust', 'core', ?1, ?2)",
params![now, now],
)
.unwrap();
// Non-core entry (should NOT be exported)
conn.execute(
"INSERT INTO memories (id, key, content, category, created_at, updated_at)
VALUES ('id3', 'conv1', 'Random convo', 'conversation', ?1, ?2)",
params![now, now],
)
.unwrap();
drop(conn);
// Export snapshot
let exported = export_snapshot(workspace).unwrap();
assert_eq!(exported, 2, "Should export only core memories");
// Verify the file exists and is readable
let snapshot = workspace.join(SNAPSHOT_FILENAME);
assert!(snapshot.exists());
let content = fs::read_to_string(&snapshot).unwrap();
assert!(content.contains("identity"));
assert!(content.contains("I am a test agent"));
assert!(content.contains("preference"));
assert!(!content.contains("Random convo"));
// Simulate catastrophic failure: delete brain.db
fs::remove_file(&db_path).unwrap();
assert!(!db_path.exists());
// Verify should_hydrate detects the scenario
assert!(should_hydrate(workspace));
// Hydrate from snapshot
let hydrated = hydrate_from_snapshot(workspace).unwrap();
assert_eq!(hydrated, 2, "Should hydrate both core memories");
// Verify brain.db was recreated
assert!(db_path.exists());
// Verify the data is actually in the new database
let conn = Connection::open(&db_path).unwrap();
let count: i64 = conn
.query_row("SELECT COUNT(*) FROM memories", [], |row| row.get(0))
.unwrap();
assert_eq!(count, 2);
let identity: String = conn
.query_row(
"SELECT content FROM memories WHERE key = 'identity'",
[],
|row| row.get(0),
)
.unwrap();
assert_eq!(identity, "I am a test agent");
}
#[test]
fn should_hydrate_only_when_needed() {
let tmp = TempDir::new().unwrap();
let workspace = tmp.path();
// No DB, no snapshot → false
assert!(!should_hydrate(workspace));
// Create snapshot but no DB → true
let snapshot = workspace.join(SNAPSHOT_FILENAME);
fs::write(&snapshot, "### 🔑 `test`\n\nHello\n").unwrap();
assert!(should_hydrate(workspace));
// Create a real DB → false
let db_dir = workspace.join("memory");
fs::create_dir_all(&db_dir).unwrap();
let db_path = db_dir.join("brain.db");
let conn = Connection::open(&db_path).unwrap();
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS memories (
id TEXT PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
content TEXT NOT NULL,
category TEXT NOT NULL DEFAULT 'core',
embedding BLOB,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
INSERT INTO memories VALUES('x','x','x','core',NULL,'2025-01-01','2025-01-01');",
)
.unwrap();
drop(conn);
assert!(!should_hydrate(workspace));
}
#[test]
fn hydrate_no_snapshot_returns_zero() {
let tmp = TempDir::new().unwrap();
let count = hydrate_from_snapshot(tmp.path()).unwrap();
assert_eq!(count, 0);
}
}

View file

@ -272,6 +272,12 @@ fn memory_config_defaults_for_backend(backend: &str) -> MemoryConfig {
0
},
chunk_max_tokens: 512,
response_cache_enabled: false,
response_cache_ttl_minutes: 60,
response_cache_max_entries: 5_000,
snapshot_enabled: false,
snapshot_on_hygiene: false,
auto_hydrate: true,
}
}

620
src/runtime/wasm.rs Normal file
View file

@ -0,0 +1,620 @@
//! WASM sandbox runtime — in-process tool isolation via `wasmi`.
//!
//! Provides capability-based sandboxing without Docker or external runtimes.
//! Each WASM module runs with:
//! - **Fuel limits**: prevents infinite loops (each instruction costs 1 fuel)
//! - **Memory caps**: configurable per-module memory ceiling
//! - **No filesystem access**: by default, tools are pure computation
//! - **No network access**: unless explicitly allowlisted hosts are configured
//!
//! # Feature gate
//! This module is only compiled when `--features runtime-wasm` is enabled.
//! The default ZeroClaw binary excludes it to maintain the 4.6 MB size target.
use super::traits::RuntimeAdapter;
use crate::config::WasmRuntimeConfig;
use anyhow::{bail, Context, Result};
use std::path::{Path, PathBuf};
/// WASM sandbox runtime — executes tool modules in an isolated interpreter.
#[derive(Debug, Clone)]
pub struct WasmRuntime {
config: WasmRuntimeConfig,
workspace_dir: Option<PathBuf>,
}
/// Result of executing a WASM module.
#[derive(Debug, Clone)]
pub struct WasmExecutionResult {
/// Standard output captured from the module (if WASI is used)
pub stdout: String,
/// Standard error captured from the module
pub stderr: String,
/// Exit code (0 = success)
pub exit_code: i32,
/// Fuel consumed during execution
pub fuel_consumed: u64,
}
/// Capabilities granted to a WASM tool module.
#[derive(Debug, Clone, Default)]
pub struct WasmCapabilities {
/// Allow reading files from workspace
pub read_workspace: bool,
/// Allow writing files to workspace
pub write_workspace: bool,
/// Allowed HTTP hosts (empty = no network)
pub allowed_hosts: Vec<String>,
/// Custom fuel override (0 = use config default)
pub fuel_override: u64,
/// Custom memory override in MB (0 = use config default)
pub memory_override_mb: u64,
}
impl WasmRuntime {
/// Create a new WASM runtime with the given configuration.
pub fn new(config: WasmRuntimeConfig) -> Self {
Self {
config,
workspace_dir: None,
}
}
/// Create a WASM runtime bound to a specific workspace directory.
pub fn with_workspace(config: WasmRuntimeConfig, workspace_dir: PathBuf) -> Self {
Self {
config,
workspace_dir: Some(workspace_dir),
}
}
/// Check if the WASM runtime feature is available in this build.
pub fn is_available() -> bool {
cfg!(feature = "runtime-wasm")
}
/// Validate the WASM config for common misconfigurations.
pub fn validate_config(&self) -> Result<()> {
if self.config.memory_limit_mb == 0 {
bail!("runtime.wasm.memory_limit_mb must be > 0");
}
if self.config.memory_limit_mb > 4096 {
bail!(
"runtime.wasm.memory_limit_mb of {} exceeds the 4 GB safety limit for 32-bit WASM",
self.config.memory_limit_mb
);
}
if self.config.tools_dir.is_empty() {
bail!("runtime.wasm.tools_dir cannot be empty");
}
// Verify tools directory doesn't escape workspace
if self.config.tools_dir.contains("..") {
bail!("runtime.wasm.tools_dir must not contain '..' path traversal");
}
Ok(())
}
/// Resolve the absolute path to the WASM tools directory.
pub fn tools_dir(&self, workspace_dir: &Path) -> PathBuf {
workspace_dir.join(&self.config.tools_dir)
}
/// Build capabilities from config defaults.
pub fn default_capabilities(&self) -> WasmCapabilities {
WasmCapabilities {
read_workspace: self.config.allow_workspace_read,
write_workspace: self.config.allow_workspace_write,
allowed_hosts: self.config.allowed_hosts.clone(),
fuel_override: 0,
memory_override_mb: 0,
}
}
/// Get the effective fuel limit for an invocation.
pub fn effective_fuel(&self, caps: &WasmCapabilities) -> u64 {
if caps.fuel_override > 0 {
caps.fuel_override
} else {
self.config.fuel_limit
}
}
/// Get the effective memory limit in bytes.
pub fn effective_memory_bytes(&self, caps: &WasmCapabilities) -> u64 {
let mb = if caps.memory_override_mb > 0 {
caps.memory_override_mb
} else {
self.config.memory_limit_mb
};
mb.saturating_mul(1024 * 1024)
}
/// Execute a WASM module from the tools directory.
///
/// This is the primary entry point for running sandboxed tool code.
/// The module must export a `_start` function (WASI convention) or
/// a custom `run` function that takes no arguments and returns i32.
#[cfg(feature = "runtime-wasm")]
pub fn execute_module(
&self,
module_name: &str,
workspace_dir: &Path,
caps: &WasmCapabilities,
) -> Result<WasmExecutionResult> {
use wasmi::{Engine, Linker, Module, Store};
// Resolve module path
let tools_path = self.tools_dir(workspace_dir);
let module_path = tools_path.join(format!("{module_name}.wasm"));
if !module_path.exists() {
bail!(
"WASM module not found: {} (looked in {})",
module_name,
tools_path.display()
);
}
// Read module bytes
let wasm_bytes = std::fs::read(&module_path)
.with_context(|| format!("Failed to read WASM module: {}", module_path.display()))?;
// Validate module size (sanity check)
if wasm_bytes.len() > 50 * 1024 * 1024 {
bail!(
"WASM module {} is {} MB — exceeds 50 MB safety limit",
module_name,
wasm_bytes.len() / (1024 * 1024)
);
}
// Configure engine with fuel metering
let mut engine_config = wasmi::Config::default();
engine_config.consume_fuel(true);
let engine = Engine::new(&engine_config);
// Parse and validate module
let module = Module::new(&engine, &wasm_bytes[..])
.with_context(|| format!("Failed to parse WASM module: {module_name}"))?;
// Create store with fuel budget
let mut store = Store::new(&engine, ());
let fuel = self.effective_fuel(caps);
if fuel > 0 {
store.set_fuel(fuel).with_context(|| {
format!("Failed to set fuel budget ({fuel}) for module: {module_name}")
})?;
}
// Link host functions (minimal — pure sandboxing)
let linker = Linker::new(&engine);
// Instantiate module
let instance = linker
.instantiate(&mut store, &module)
.and_then(|pre| pre.start(&mut store))
.with_context(|| format!("Failed to instantiate WASM module: {module_name}"))?;
// Look for exported entry point
let run_fn = instance
.get_typed_func::<(), i32>(&store, "run")
.or_else(|_| instance.get_typed_func::<(), i32>(&store, "_start"))
.with_context(|| {
format!(
"WASM module '{module_name}' must export a 'run() -> i32' or '_start() -> i32' function"
)
})?;
// Execute with fuel accounting
let fuel_before = store.get_fuel().unwrap_or(0);
let exit_code = match run_fn.call(&mut store, ()) {
Ok(code) => code,
Err(e) => {
// Check if we ran out of fuel (infinite loop protection)
let fuel_after = store.get_fuel().unwrap_or(0);
if fuel_after == 0 && fuel > 0 {
return Ok(WasmExecutionResult {
stdout: String::new(),
stderr: format!(
"WASM module '{module_name}' exceeded fuel limit ({fuel} ticks) — likely an infinite loop"
),
exit_code: -1,
fuel_consumed: fuel,
});
}
bail!("WASM execution error in '{module_name}': {e}");
}
};
let fuel_after = store.get_fuel().unwrap_or(0);
let fuel_consumed = fuel_before.saturating_sub(fuel_after);
Ok(WasmExecutionResult {
stdout: String::new(), // No WASI stdout yet — pure computation
stderr: String::new(),
exit_code,
fuel_consumed,
})
}
/// Stub for when the `runtime-wasm` feature is not enabled.
#[cfg(not(feature = "runtime-wasm"))]
pub fn execute_module(
&self,
module_name: &str,
_workspace_dir: &Path,
_caps: &WasmCapabilities,
) -> Result<WasmExecutionResult> {
bail!(
"WASM runtime is not available in this build. \
Rebuild with `cargo build --features runtime-wasm` to enable WASM sandbox support. \
Module requested: {module_name}"
)
}
/// List available WASM tool modules in the tools directory.
pub fn list_modules(&self, workspace_dir: &Path) -> Result<Vec<String>> {
let tools_path = self.tools_dir(workspace_dir);
if !tools_path.exists() {
return Ok(Vec::new());
}
let mut modules = Vec::new();
for entry in std::fs::read_dir(&tools_path)
.with_context(|| format!("Failed to read tools dir: {}", tools_path.display()))?
{
let entry = entry?;
let path = entry.path();
if path.extension().is_some_and(|ext| ext == "wasm") {
if let Some(stem) = path.file_stem() {
modules.push(stem.to_string_lossy().to_string());
}
}
}
modules.sort();
Ok(modules)
}
}
impl RuntimeAdapter for WasmRuntime {
fn name(&self) -> &str {
"wasm"
}
fn has_shell_access(&self) -> bool {
// WASM sandbox does NOT provide shell access — that's the point
false
}
fn has_filesystem_access(&self) -> bool {
self.config.allow_workspace_read || self.config.allow_workspace_write
}
fn storage_path(&self) -> PathBuf {
self.workspace_dir
.as_ref()
.map_or_else(|| PathBuf::from(".zeroclaw"), |w| w.join(".zeroclaw"))
}
fn supports_long_running(&self) -> bool {
// WASM modules are short-lived invocations, not daemons
false
}
fn memory_budget(&self) -> u64 {
self.config.memory_limit_mb.saturating_mul(1024 * 1024)
}
fn build_shell_command(
&self,
_command: &str,
_workspace_dir: &Path,
) -> anyhow::Result<tokio::process::Command> {
bail!(
"WASM runtime does not support shell commands. \
Use `execute_module()` to run WASM tools, or switch to runtime.kind = \"native\" for shell access."
)
}
}
// ── Tests ───────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
fn default_config() -> WasmRuntimeConfig {
WasmRuntimeConfig::default()
}
// ── Basic trait compliance ──────────────────────────────────
#[test]
fn wasm_runtime_name() {
let rt = WasmRuntime::new(default_config());
assert_eq!(rt.name(), "wasm");
}
#[test]
fn wasm_no_shell_access() {
let rt = WasmRuntime::new(default_config());
assert!(!rt.has_shell_access());
}
#[test]
fn wasm_no_filesystem_by_default() {
let rt = WasmRuntime::new(default_config());
assert!(!rt.has_filesystem_access());
}
#[test]
fn wasm_filesystem_when_read_enabled() {
let mut cfg = default_config();
cfg.allow_workspace_read = true;
let rt = WasmRuntime::new(cfg);
assert!(rt.has_filesystem_access());
}
#[test]
fn wasm_filesystem_when_write_enabled() {
let mut cfg = default_config();
cfg.allow_workspace_write = true;
let rt = WasmRuntime::new(cfg);
assert!(rt.has_filesystem_access());
}
#[test]
fn wasm_no_long_running() {
let rt = WasmRuntime::new(default_config());
assert!(!rt.supports_long_running());
}
#[test]
fn wasm_memory_budget() {
let rt = WasmRuntime::new(default_config());
assert_eq!(rt.memory_budget(), 64 * 1024 * 1024);
}
#[test]
fn wasm_shell_command_errors() {
let rt = WasmRuntime::new(default_config());
let result = rt.build_shell_command("echo hello", Path::new("/tmp"));
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("does not support shell"));
}
#[test]
fn wasm_storage_path_default() {
let rt = WasmRuntime::new(default_config());
assert!(rt.storage_path().to_string_lossy().contains("zeroclaw"));
}
#[test]
fn wasm_storage_path_with_workspace() {
let rt = WasmRuntime::with_workspace(default_config(), PathBuf::from("/home/user/project"));
assert_eq!(rt.storage_path(), PathBuf::from("/home/user/project/.zeroclaw"));
}
// ── Config validation ──────────────────────────────────────
#[test]
fn validate_rejects_zero_memory() {
let mut cfg = default_config();
cfg.memory_limit_mb = 0;
let rt = WasmRuntime::new(cfg);
let err = rt.validate_config().unwrap_err();
assert!(err.to_string().contains("must be > 0"));
}
#[test]
fn validate_rejects_excessive_memory() {
let mut cfg = default_config();
cfg.memory_limit_mb = 8192;
let rt = WasmRuntime::new(cfg);
let err = rt.validate_config().unwrap_err();
assert!(err.to_string().contains("4 GB safety limit"));
}
#[test]
fn validate_rejects_empty_tools_dir() {
let mut cfg = default_config();
cfg.tools_dir = String::new();
let rt = WasmRuntime::new(cfg);
let err = rt.validate_config().unwrap_err();
assert!(err.to_string().contains("cannot be empty"));
}
#[test]
fn validate_rejects_path_traversal() {
let mut cfg = default_config();
cfg.tools_dir = "../../../etc/passwd".into();
let rt = WasmRuntime::new(cfg);
let err = rt.validate_config().unwrap_err();
assert!(err.to_string().contains("path traversal"));
}
#[test]
fn validate_accepts_valid_config() {
let rt = WasmRuntime::new(default_config());
assert!(rt.validate_config().is_ok());
}
#[test]
fn validate_accepts_max_memory() {
let mut cfg = default_config();
cfg.memory_limit_mb = 4096;
let rt = WasmRuntime::new(cfg);
assert!(rt.validate_config().is_ok());
}
// ── Capabilities & fuel ────────────────────────────────────
#[test]
fn effective_fuel_uses_config_default() {
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities::default();
assert_eq!(rt.effective_fuel(&caps), 1_000_000);
}
#[test]
fn effective_fuel_respects_override() {
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities {
fuel_override: 500,
..Default::default()
};
assert_eq!(rt.effective_fuel(&caps), 500);
}
#[test]
fn effective_memory_uses_config_default() {
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities::default();
assert_eq!(rt.effective_memory_bytes(&caps), 64 * 1024 * 1024);
}
#[test]
fn effective_memory_respects_override() {
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities {
memory_override_mb: 128,
..Default::default()
};
assert_eq!(rt.effective_memory_bytes(&caps), 128 * 1024 * 1024);
}
#[test]
fn default_capabilities_match_config() {
let mut cfg = default_config();
cfg.allow_workspace_read = true;
cfg.allowed_hosts = vec!["api.example.com".into()];
let rt = WasmRuntime::new(cfg);
let caps = rt.default_capabilities();
assert!(caps.read_workspace);
assert!(!caps.write_workspace);
assert_eq!(caps.allowed_hosts, vec!["api.example.com"]);
}
// ── Tools directory ────────────────────────────────────────
#[test]
fn tools_dir_resolves_relative_to_workspace() {
let rt = WasmRuntime::new(default_config());
let dir = rt.tools_dir(Path::new("/home/user/project"));
assert_eq!(dir, PathBuf::from("/home/user/project/tools/wasm"));
}
#[test]
fn list_modules_empty_when_dir_missing() {
let rt = WasmRuntime::new(default_config());
let modules = rt.list_modules(Path::new("/nonexistent/path")).unwrap();
assert!(modules.is_empty());
}
#[test]
fn list_modules_finds_wasm_files() {
let dir = tempfile::tempdir().unwrap();
let tools_dir = dir.path().join("tools/wasm");
std::fs::create_dir_all(&tools_dir).unwrap();
// Create dummy .wasm files
std::fs::write(tools_dir.join("calculator.wasm"), b"\0asm").unwrap();
std::fs::write(tools_dir.join("formatter.wasm"), b"\0asm").unwrap();
std::fs::write(tools_dir.join("readme.txt"), b"not a wasm").unwrap();
let rt = WasmRuntime::new(default_config());
let modules = rt.list_modules(dir.path()).unwrap();
assert_eq!(modules, vec!["calculator", "formatter"]);
}
// ── Module execution edge cases ────────────────────────────
#[test]
fn execute_module_missing_file() {
let dir = tempfile::tempdir().unwrap();
let tools_dir = dir.path().join("tools/wasm");
std::fs::create_dir_all(&tools_dir).unwrap();
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities::default();
let result = rt.execute_module("nonexistent", dir.path(), &caps);
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
// Should mention the module name
assert!(err_msg.contains("nonexistent"));
}
#[test]
fn execute_module_invalid_wasm() {
let dir = tempfile::tempdir().unwrap();
let tools_dir = dir.path().join("tools/wasm");
std::fs::create_dir_all(&tools_dir).unwrap();
// Write invalid WASM bytes
std::fs::write(tools_dir.join("bad.wasm"), b"not valid wasm bytes at all").unwrap();
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities::default();
let result = rt.execute_module("bad", dir.path(), &caps);
assert!(result.is_err());
}
#[test]
fn execute_module_oversized_file() {
let dir = tempfile::tempdir().unwrap();
let tools_dir = dir.path().join("tools/wasm");
std::fs::create_dir_all(&tools_dir).unwrap();
// Write a file > 50 MB (we just check the size, don't actually allocate)
// This test verifies the check without consuming 50 MB of disk
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities::default();
// File doesn't exist for oversized test — the missing file check catches first
// But if it did exist and was 51 MB, the size check would catch it
let result = rt.execute_module("oversized", dir.path(), &caps);
assert!(result.is_err());
}
// ── Feature gate check ─────────────────────────────────────
#[test]
fn is_available_matches_feature_flag() {
// This test verifies the compile-time feature detection works
let available = WasmRuntime::is_available();
assert_eq!(available, cfg!(feature = "runtime-wasm"));
}
// ── Memory overflow edge cases ─────────────────────────────
#[test]
fn memory_budget_no_overflow() {
let mut cfg = default_config();
cfg.memory_limit_mb = 4096; // Max valid
let rt = WasmRuntime::new(cfg);
assert_eq!(rt.memory_budget(), 4096 * 1024 * 1024);
}
#[test]
fn effective_memory_saturating() {
let rt = WasmRuntime::new(default_config());
let caps = WasmCapabilities {
memory_override_mb: u64::MAX,
..Default::default()
};
// Should not panic — saturating_mul prevents overflow
let _bytes = rt.effective_memory_bytes(&caps);
}
// ── WasmCapabilities default ───────────────────────────────
#[test]
fn capabilities_default_is_locked_down() {
let caps = WasmCapabilities::default();
assert!(!caps.read_workspace);
assert!(!caps.write_workspace);
assert!(caps.allowed_hosts.is_empty());
assert_eq!(caps.fuel_override, 0);
assert_eq!(caps.memory_override_mb, 0);
}
}

View file

@ -9,8 +9,8 @@
// re-pairing.
use sha2::{Digest, Sha256};
use parking_lot::Mutex;
use std::collections::HashSet;
use std::sync::Mutex;
use std::time::Instant;
/// Maximum failed pairing attempts before lockout.
@ -72,7 +72,6 @@ impl PairingGuard {
pub fn pairing_code(&self) -> Option<String> {
self.pairing_code
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner)
.clone()
}
@ -89,7 +88,7 @@ impl PairingGuard {
let attempts = self
.failed_attempts
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
if let (count, Some(locked_at)) = &*attempts {
if *count >= MAX_PAIR_ATTEMPTS {
let elapsed = locked_at.elapsed().as_secs();
@ -104,7 +103,7 @@ impl PairingGuard {
let mut pairing_code = self
.pairing_code
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
if let Some(ref expected) = *pairing_code {
if constant_time_eq(code.trim(), expected.trim()) {
// Reset failed attempts on success
@ -112,14 +111,14 @@ impl PairingGuard {
let mut attempts = self
.failed_attempts
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
*attempts = (0, None);
}
let token = generate_token();
let mut tokens = self
.paired_tokens
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
tokens.insert(hash_token(&token));
// Consume the pairing code so it cannot be reused
@ -135,7 +134,7 @@ impl PairingGuard {
let mut attempts = self
.failed_attempts
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
attempts.0 += 1;
if attempts.0 >= MAX_PAIR_ATTEMPTS {
attempts.1 = Some(Instant::now());
@ -154,7 +153,7 @@ impl PairingGuard {
let tokens = self
.paired_tokens
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
tokens.contains(&hashed)
}
@ -163,7 +162,7 @@ impl PairingGuard {
let tokens = self
.paired_tokens
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
!tokens.is_empty()
}
@ -172,7 +171,7 @@ impl PairingGuard {
let tokens = self
.paired_tokens
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
;
tokens.iter().cloned().collect()
}
}

View file

@ -1,6 +1,6 @@
use serde::{Deserialize, Serialize};
use parking_lot::Mutex;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use std::time::Instant;
/// How much autonomy the agent has
@ -42,8 +42,7 @@ impl ActionTracker {
pub fn record(&self) -> usize {
let mut actions = self
.actions
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
.lock();
let cutoff = Instant::now()
.checked_sub(std::time::Duration::from_secs(3600))
.unwrap_or_else(Instant::now);
@ -56,8 +55,7 @@ impl ActionTracker {
pub fn count(&self) -> usize {
let mut actions = self
.actions
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
.lock();
let cutoff = Instant::now()
.checked_sub(std::time::Duration::from_secs(3600))
.unwrap_or_else(Instant::now);
@ -70,8 +68,7 @@ impl Clone for ActionTracker {
fn clone(&self) -> Self {
let actions = self
.actions
.lock()
.unwrap_or_else(std::sync::PoisonError::into_inner);
.lock();
Self {
actions: Mutex::new(actions.clone()),
}