readd tests, remove markdown files
This commit is contained in:
parent
e2634c72c2
commit
9a6fa76825
17 changed files with 1352 additions and 0 deletions
|
|
@ -348,4 +348,76 @@ mod tests {
|
|||
let result = cache.get(&key).unwrap();
|
||||
assert_eq!(result.as_deref(), Some("はい、Rustは素晴らしい"));
|
||||
}
|
||||
|
||||
// ── §4.4 Cache eviction under pressure tests ─────────────
|
||||
|
||||
#[test]
|
||||
fn lru_eviction_keeps_most_recent() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let cache = ResponseCache::new(tmp.path(), 60, 3).unwrap();
|
||||
|
||||
// Insert 3 entries
|
||||
for i in 0..3 {
|
||||
let key = ResponseCache::cache_key("gpt-4", None, &format!("prompt {i}"));
|
||||
cache
|
||||
.put(&key, "gpt-4", &format!("response {i}"), 10)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Access entry 0 to make it recently used
|
||||
let key0 = ResponseCache::cache_key("gpt-4", None, "prompt 0");
|
||||
let _ = cache.get(&key0).unwrap();
|
||||
|
||||
// Insert entry 3 (triggers eviction)
|
||||
let key3 = ResponseCache::cache_key("gpt-4", None, "prompt 3");
|
||||
cache.put(&key3, "gpt-4", "response 3", 10).unwrap();
|
||||
|
||||
let (count, _, _) = cache.stats().unwrap();
|
||||
assert!(count <= 3, "cache must not exceed max_entries");
|
||||
|
||||
// Entry 0 was recently accessed and should survive
|
||||
let entry0 = cache.get(&key0).unwrap();
|
||||
assert!(
|
||||
entry0.is_some(),
|
||||
"recently accessed entry should survive LRU eviction"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_handles_zero_max_entries() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let cache = ResponseCache::new(tmp.path(), 60, 0).unwrap();
|
||||
|
||||
let key = ResponseCache::cache_key("gpt-4", None, "test");
|
||||
// Should not panic even with max_entries=0
|
||||
cache.put(&key, "gpt-4", "response", 10).unwrap();
|
||||
|
||||
let (count, _, _) = cache.stats().unwrap();
|
||||
assert_eq!(count, 0, "cache with max_entries=0 should evict everything");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_concurrent_reads_no_panic() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let cache = std::sync::Arc::new(ResponseCache::new(tmp.path(), 60, 100).unwrap());
|
||||
|
||||
let key = ResponseCache::cache_key("gpt-4", None, "concurrent");
|
||||
cache.put(&key, "gpt-4", "response", 10).unwrap();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for _ in 0..10 {
|
||||
let cache = std::sync::Arc::clone(&cache);
|
||||
let key = key.clone();
|
||||
handles.push(std::thread::spawn(move || {
|
||||
let _ = cache.get(&key).unwrap();
|
||||
}));
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
||||
let (_, hits, _) = cache.stats().unwrap();
|
||||
assert_eq!(hits, 10, "all concurrent reads should register as hits");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1666,4 +1666,117 @@ mod tests {
|
|||
assert_eq!(results[0].session_id.as_deref(), Some("sess-x"));
|
||||
}
|
||||
}
|
||||
|
||||
// ── §4.1 Concurrent write contention tests ──────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn sqlite_concurrent_writes_no_data_loss() {
|
||||
let (_tmp, mem) = temp_sqlite();
|
||||
let mem = std::sync::Arc::new(mem);
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..10 {
|
||||
let mem = std::sync::Arc::clone(&mem);
|
||||
handles.push(tokio::spawn(async move {
|
||||
mem.store(
|
||||
&format!("concurrent_key_{i}"),
|
||||
&format!("value_{i}"),
|
||||
MemoryCategory::Core,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}));
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
let count = mem.count().await.unwrap();
|
||||
assert_eq!(
|
||||
count, 10,
|
||||
"all 10 concurrent writes must succeed without data loss"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn sqlite_concurrent_read_write_no_panic() {
|
||||
let (_tmp, mem) = temp_sqlite();
|
||||
let mem = std::sync::Arc::new(mem);
|
||||
|
||||
// Pre-populate
|
||||
mem.store("shared_key", "initial", MemoryCategory::Core, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
|
||||
// Concurrent reads
|
||||
for _ in 0..5 {
|
||||
let mem = std::sync::Arc::clone(&mem);
|
||||
handles.push(tokio::spawn(async move {
|
||||
let _ = mem.get("shared_key").await.unwrap();
|
||||
}));
|
||||
}
|
||||
|
||||
// Concurrent writes
|
||||
for i in 0..5 {
|
||||
let mem = std::sync::Arc::clone(&mem);
|
||||
handles.push(tokio::spawn(async move {
|
||||
mem.store(
|
||||
&format!("key_{i}"),
|
||||
&format!("val_{i}"),
|
||||
MemoryCategory::Core,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}));
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
// Should have 6 total entries (1 pre-existing + 5 new)
|
||||
assert_eq!(mem.count().await.unwrap(), 6);
|
||||
}
|
||||
|
||||
// ── §4.2 Reindex / corruption recovery tests ────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn sqlite_reindex_preserves_data() {
|
||||
let (_tmp, mem) = temp_sqlite();
|
||||
mem.store("a", "Rust is fast", MemoryCategory::Core, None)
|
||||
.await
|
||||
.unwrap();
|
||||
mem.store("b", "Python is interpreted", MemoryCategory::Core, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
mem.reindex().await.unwrap();
|
||||
|
||||
let count = mem.count().await.unwrap();
|
||||
assert_eq!(count, 2, "reindex must preserve all entries");
|
||||
|
||||
let entry = mem.get("a").await.unwrap();
|
||||
assert!(entry.is_some());
|
||||
assert_eq!(entry.unwrap().content, "Rust is fast");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn sqlite_reindex_idempotent() {
|
||||
let (_tmp, mem) = temp_sqlite();
|
||||
mem.store("x", "test data", MemoryCategory::Core, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Multiple reindex calls should be safe
|
||||
mem.reindex().await.unwrap();
|
||||
mem.reindex().await.unwrap();
|
||||
mem.reindex().await.unwrap();
|
||||
|
||||
assert_eq!(mem.count().await.unwrap(), 1);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
104
src/migration.rs
104
src/migration.rs
|
|
@ -556,4 +556,108 @@ mod tests {
|
|||
.expect("backend=none should be rejected for migration target");
|
||||
assert!(err.to_string().contains("disables persistence"));
|
||||
}
|
||||
|
||||
// ── §7.1 / §7.2 Config backward compatibility & migration tests ──
|
||||
|
||||
#[test]
|
||||
fn parse_category_handles_all_variants() {
|
||||
assert_eq!(parse_category("core"), MemoryCategory::Core);
|
||||
assert_eq!(parse_category("daily"), MemoryCategory::Daily);
|
||||
assert_eq!(parse_category("conversation"), MemoryCategory::Conversation);
|
||||
assert_eq!(parse_category(""), MemoryCategory::Core);
|
||||
assert_eq!(
|
||||
parse_category("custom_type"),
|
||||
MemoryCategory::Custom("custom_type".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_category_case_insensitive() {
|
||||
assert_eq!(parse_category("CORE"), MemoryCategory::Core);
|
||||
assert_eq!(parse_category("Daily"), MemoryCategory::Daily);
|
||||
assert_eq!(parse_category("CONVERSATION"), MemoryCategory::Conversation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_key_handles_empty_string() {
|
||||
let key = normalize_key("", 42);
|
||||
assert_eq!(key, "openclaw_42");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_key_trims_whitespace() {
|
||||
let key = normalize_key(" my_key ", 0);
|
||||
assert_eq!(key, "my_key");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_structured_markdown_rejects_empty_key() {
|
||||
assert!(parse_structured_memory_line("****:value").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_structured_markdown_rejects_empty_value() {
|
||||
assert!(parse_structured_memory_line("**key**:").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_structured_markdown_rejects_no_stars() {
|
||||
assert!(parse_structured_memory_line("key: value").is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn migration_skips_empty_content() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let db_path = dir.path().join("brain.db");
|
||||
let conn = Connection::open(&db_path).unwrap();
|
||||
|
||||
conn.execute_batch("CREATE TABLE memories (key TEXT, content TEXT, category TEXT);")
|
||||
.unwrap();
|
||||
conn.execute(
|
||||
"INSERT INTO memories (key, content, category) VALUES (?1, ?2, ?3)",
|
||||
params!["empty_key", " ", "core"],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rows = read_openclaw_sqlite_entries(&db_path).unwrap();
|
||||
assert_eq!(
|
||||
rows.len(),
|
||||
0,
|
||||
"entries with empty/whitespace content must be skipped"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backup_creates_timestamped_directory() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let mem_dir = tmp.path().join("memory");
|
||||
std::fs::create_dir_all(&mem_dir).unwrap();
|
||||
|
||||
// Create a brain.db to back up
|
||||
let db_path = mem_dir.join("brain.db");
|
||||
std::fs::write(&db_path, "fake db content").unwrap();
|
||||
|
||||
let result = backup_target_memory(tmp.path()).unwrap();
|
||||
assert!(
|
||||
result.is_some(),
|
||||
"backup should be created when files exist"
|
||||
);
|
||||
|
||||
let backup_dir = result.unwrap();
|
||||
assert!(backup_dir.exists());
|
||||
assert!(
|
||||
backup_dir.to_string_lossy().contains("openclaw-"),
|
||||
"backup dir must contain openclaw- prefix"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backup_returns_none_when_no_files() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let result = backup_target_memory(tmp.path()).unwrap();
|
||||
assert!(
|
||||
result.is_none(),
|
||||
"backup should return None when no files to backup"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -467,4 +467,56 @@ mod tests {
|
|||
obs.record_event(&ObserverEvent::HeartbeatTick);
|
||||
obs.flush();
|
||||
}
|
||||
|
||||
// ── §8.2 OTel export failure resilience tests ────────────
|
||||
|
||||
#[test]
|
||||
fn otel_records_error_event_without_panic() {
|
||||
let obs = test_observer();
|
||||
// Simulate an error event — should not panic even with unreachable endpoint
|
||||
obs.record_event(&ObserverEvent::Error {
|
||||
component: "provider".into(),
|
||||
message: "connection refused to model endpoint".into(),
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn otel_records_llm_failure_without_panic() {
|
||||
let obs = test_observer();
|
||||
obs.record_event(&ObserverEvent::LlmResponse {
|
||||
provider: "openrouter".into(),
|
||||
model: "missing-model".into(),
|
||||
duration: Duration::from_millis(0),
|
||||
success: false,
|
||||
error_message: Some("404 Not Found".into()),
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn otel_flush_idempotent_with_unreachable_endpoint() {
|
||||
let obs = test_observer();
|
||||
// Multiple flushes should not panic even when endpoint is unreachable
|
||||
obs.flush();
|
||||
obs.flush();
|
||||
obs.flush();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn otel_records_zero_duration_metrics() {
|
||||
let obs = test_observer();
|
||||
obs.record_metric(&ObserverMetric::RequestLatency(Duration::ZERO));
|
||||
obs.record_metric(&ObserverMetric::TokensUsed(0));
|
||||
obs.record_metric(&ObserverMetric::ActiveSessions(0));
|
||||
obs.record_metric(&ObserverMetric::QueueDepth(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn otel_observer_creation_with_valid_endpoint_succeeds() {
|
||||
// Even though endpoint is unreachable, creation should succeed
|
||||
let result = OtelObserver::new(Some("http://127.0.0.1:12345"), Some("zeroclaw-test"));
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"observer creation must succeed even with unreachable endpoint"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1012,6 +1012,140 @@ mod tests {
|
|||
assert_eq!(provider.compute_backoff(500, &err), 500);
|
||||
}
|
||||
|
||||
// ── §2.1 API auth error (401/403) tests ──────────────────
|
||||
|
||||
#[test]
|
||||
fn non_retryable_detects_401() {
|
||||
let err = anyhow::anyhow!("API error (401 Unauthorized): invalid api key");
|
||||
assert!(
|
||||
is_non_retryable(&err),
|
||||
"401 errors must be detected as non-retryable"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_detects_403() {
|
||||
let err = anyhow::anyhow!("API error (403 Forbidden): access denied");
|
||||
assert!(
|
||||
is_non_retryable(&err),
|
||||
"403 errors must be detected as non-retryable"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_detects_404() {
|
||||
let err = anyhow::anyhow!("API error (404 Not Found): model not found");
|
||||
assert!(
|
||||
is_non_retryable(&err),
|
||||
"404 errors must be detected as non-retryable"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_does_not_flag_429() {
|
||||
let err = anyhow::anyhow!("429 Too Many Requests");
|
||||
assert!(
|
||||
!is_non_retryable(&err),
|
||||
"429 must NOT be treated as non-retryable (it is retryable with backoff)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_does_not_flag_408() {
|
||||
let err = anyhow::anyhow!("408 Request Timeout");
|
||||
assert!(
|
||||
!is_non_retryable(&err),
|
||||
"408 must NOT be treated as non-retryable (it is retryable)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_does_not_flag_500() {
|
||||
let err = anyhow::anyhow!("500 Internal Server Error");
|
||||
assert!(
|
||||
!is_non_retryable(&err),
|
||||
"500 must NOT be treated as non-retryable (server errors are retryable)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_retryable_does_not_flag_502() {
|
||||
let err = anyhow::anyhow!("502 Bad Gateway");
|
||||
assert!(
|
||||
!is_non_retryable(&err),
|
||||
"502 must NOT be treated as non-retryable"
|
||||
);
|
||||
}
|
||||
|
||||
// ── §2.2 Rate limit Retry-After edge cases ───────────────
|
||||
|
||||
#[test]
|
||||
fn parse_retry_after_zero() {
|
||||
let err = anyhow::anyhow!("429 Too Many Requests, Retry-After: 0");
|
||||
assert_eq!(
|
||||
parse_retry_after_ms(&err),
|
||||
Some(0),
|
||||
"Retry-After: 0 should parse as 0ms"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_retry_after_with_underscore_separator() {
|
||||
let err = anyhow::anyhow!("rate limited, retry_after: 10");
|
||||
assert_eq!(
|
||||
parse_retry_after_ms(&err),
|
||||
Some(10_000),
|
||||
"retry_after with underscore must be parsed"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_retry_after_space_separator() {
|
||||
let err = anyhow::anyhow!("Retry-After 7");
|
||||
assert_eq!(
|
||||
parse_retry_after_ms(&err),
|
||||
Some(7000),
|
||||
"Retry-After with space separator must be parsed"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rate_limited_false_for_generic_error() {
|
||||
let err = anyhow::anyhow!("Connection refused");
|
||||
assert!(
|
||||
!is_rate_limited(&err),
|
||||
"generic errors must not be flagged as rate-limited"
|
||||
);
|
||||
}
|
||||
|
||||
// ── §2.3 Malformed API response error classification ─────
|
||||
|
||||
#[tokio::test]
|
||||
async fn non_retryable_skips_retries_for_401() {
|
||||
let calls = Arc::new(AtomicUsize::new(0));
|
||||
let provider = ReliableProvider::new(
|
||||
vec![(
|
||||
"primary".into(),
|
||||
Box::new(MockProvider {
|
||||
calls: Arc::clone(&calls),
|
||||
fail_until_attempt: usize::MAX,
|
||||
response: "never",
|
||||
error: "API error (401 Unauthorized): invalid key",
|
||||
}),
|
||||
)],
|
||||
5,
|
||||
1,
|
||||
);
|
||||
|
||||
let result = provider.simple_chat("hello", "test", 0.0).await;
|
||||
assert!(result.is_err(), "401 should fail without retries");
|
||||
assert_eq!(
|
||||
calls.load(Ordering::SeqCst),
|
||||
1,
|
||||
"must not retry on 401 — should be exactly 1 call"
|
||||
);
|
||||
}
|
||||
|
||||
// ── Arc<ModelAwareMock> Provider impl for test ──
|
||||
|
||||
#[async_trait]
|
||||
|
|
|
|||
|
|
@ -196,4 +196,76 @@ mod tests {
|
|||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// ── §3.3 / §3.4 Docker mount & network isolation tests ──
|
||||
|
||||
#[test]
|
||||
fn docker_build_shell_command_includes_network_flag() {
|
||||
let cfg = DockerRuntimeConfig {
|
||||
network: "none".into(),
|
||||
..DockerRuntimeConfig::default()
|
||||
};
|
||||
let runtime = DockerRuntime::new(cfg);
|
||||
let workspace = std::env::temp_dir();
|
||||
let cmd = runtime
|
||||
.build_shell_command("echo hello", &workspace)
|
||||
.unwrap();
|
||||
let debug = format!("{cmd:?}");
|
||||
assert!(
|
||||
debug.contains("--network") && debug.contains("none"),
|
||||
"must include --network none for isolation"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_build_shell_command_includes_read_only_flag() {
|
||||
let cfg = DockerRuntimeConfig {
|
||||
read_only_rootfs: true,
|
||||
..DockerRuntimeConfig::default()
|
||||
};
|
||||
let runtime = DockerRuntime::new(cfg);
|
||||
let workspace = std::env::temp_dir();
|
||||
let cmd = runtime
|
||||
.build_shell_command("echo hello", &workspace)
|
||||
.unwrap();
|
||||
let debug = format!("{cmd:?}");
|
||||
assert!(
|
||||
debug.contains("--read-only"),
|
||||
"must include --read-only flag when read_only_rootfs is set"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn docker_refuses_root_mount() {
|
||||
let cfg = DockerRuntimeConfig {
|
||||
mount_workspace: true,
|
||||
..DockerRuntimeConfig::default()
|
||||
};
|
||||
let runtime = DockerRuntime::new(cfg);
|
||||
let result = runtime.build_shell_command("echo test", Path::new("/"));
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"mounting filesystem root (/) must be refused"
|
||||
);
|
||||
assert!(result.unwrap_err().to_string().contains("root"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_no_memory_flag_when_not_configured() {
|
||||
let cfg = DockerRuntimeConfig {
|
||||
memory_limit_mb: None,
|
||||
..DockerRuntimeConfig::default()
|
||||
};
|
||||
let runtime = DockerRuntime::new(cfg);
|
||||
let workspace = std::env::temp_dir();
|
||||
let cmd = runtime
|
||||
.build_shell_command("echo hello", &workspace)
|
||||
.unwrap();
|
||||
let debug = format!("{cmd:?}");
|
||||
assert!(
|
||||
!debug.contains("--memory"),
|
||||
"should not include --memory when not configured"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -617,4 +617,71 @@ mod tests {
|
|||
assert_eq!(caps.fuel_override, 0);
|
||||
assert_eq!(caps.memory_override_mb, 0);
|
||||
}
|
||||
|
||||
// ── §3.1 / §3.2 WASM fuel & memory exhaustion tests ─────
|
||||
|
||||
#[test]
|
||||
fn wasm_fuel_limit_enforced_in_config() {
|
||||
let rt = WasmRuntime::new(default_config());
|
||||
let caps = WasmCapabilities::default();
|
||||
let fuel = rt.effective_fuel(&caps);
|
||||
assert!(
|
||||
fuel > 0,
|
||||
"default fuel limit must be > 0 to prevent infinite loops"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wasm_memory_limit_enforced_in_config() {
|
||||
let rt = WasmRuntime::new(default_config());
|
||||
let caps = WasmCapabilities::default();
|
||||
let mem_bytes = rt.effective_memory_bytes(&caps);
|
||||
assert!(
|
||||
mem_bytes > 0,
|
||||
"default memory limit must be > 0"
|
||||
);
|
||||
assert!(
|
||||
mem_bytes <= 4096 * 1024 * 1024,
|
||||
"default memory must not exceed 4 GB safety limit"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wasm_zero_fuel_override_uses_default() {
|
||||
let rt = WasmRuntime::new(default_config());
|
||||
let caps = WasmCapabilities {
|
||||
fuel_override: 0,
|
||||
..Default::default()
|
||||
};
|
||||
assert_eq!(
|
||||
rt.effective_fuel(&caps),
|
||||
1_000_000,
|
||||
"fuel_override=0 must use config default"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_memory_just_above_limit() {
|
||||
let mut cfg = default_config();
|
||||
cfg.memory_limit_mb = 4097;
|
||||
let rt = WasmRuntime::new(cfg);
|
||||
let err = rt.validate_config().unwrap_err();
|
||||
assert!(err.to_string().contains("4 GB safety limit"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn execute_module_stub_returns_error_without_feature() {
|
||||
if !WasmRuntime::is_available() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let tools_dir = dir.path().join("tools/wasm");
|
||||
std::fs::create_dir_all(&tools_dir).unwrap();
|
||||
std::fs::write(tools_dir.join("test.wasm"), b"\0asm\x01\0\0\0").unwrap();
|
||||
|
||||
let rt = WasmRuntime::new(default_config());
|
||||
let caps = WasmCapabilities::default();
|
||||
let result = rt.execute_module("test", dir.path(), &caps);
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not available"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -332,4 +332,92 @@ mod tests {
|
|||
assert!(!tmp.path().join("audit.log").exists());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── §8.1 Log rotation tests ─────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn audit_logger_writes_event_when_enabled() -> Result<()> {
|
||||
let tmp = TempDir::new()?;
|
||||
let config = AuditConfig {
|
||||
enabled: true,
|
||||
max_size_mb: 10,
|
||||
..Default::default()
|
||||
};
|
||||
let logger = AuditLogger::new(config, tmp.path().to_path_buf())?;
|
||||
let event = AuditEvent::new(AuditEventType::CommandExecution)
|
||||
.with_actor("cli".to_string(), None, None)
|
||||
.with_action("ls".to_string(), "low".to_string(), false, true);
|
||||
|
||||
logger.log(&event)?;
|
||||
|
||||
let log_path = tmp.path().join("audit.log");
|
||||
assert!(log_path.exists(), "audit log file must be created");
|
||||
|
||||
let content = std::fs::read_to_string(&log_path)?;
|
||||
assert!(!content.is_empty(), "audit log must not be empty");
|
||||
|
||||
let parsed: AuditEvent = serde_json::from_str(content.trim())?;
|
||||
assert!(parsed.action.is_some());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn audit_log_command_event_writes_structured_entry() -> Result<()> {
|
||||
let tmp = TempDir::new()?;
|
||||
let config = AuditConfig {
|
||||
enabled: true,
|
||||
max_size_mb: 10,
|
||||
..Default::default()
|
||||
};
|
||||
let logger = AuditLogger::new(config, tmp.path().to_path_buf())?;
|
||||
|
||||
logger.log_command_event(CommandExecutionLog {
|
||||
channel: "telegram",
|
||||
command: "echo test",
|
||||
risk_level: "low",
|
||||
approved: false,
|
||||
allowed: true,
|
||||
success: true,
|
||||
duration_ms: 42,
|
||||
})?;
|
||||
|
||||
let log_path = tmp.path().join("audit.log");
|
||||
let content = std::fs::read_to_string(&log_path)?;
|
||||
let parsed: AuditEvent = serde_json::from_str(content.trim())?;
|
||||
|
||||
let action = parsed.action.unwrap();
|
||||
assert_eq!(action.command, Some("echo test".to_string()));
|
||||
assert_eq!(action.risk_level, Some("low".to_string()));
|
||||
assert!(action.allowed);
|
||||
|
||||
let result = parsed.result.unwrap();
|
||||
assert!(result.success);
|
||||
assert_eq!(result.duration_ms, Some(42));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn audit_rotation_creates_numbered_backup() -> Result<()> {
|
||||
let tmp = TempDir::new()?;
|
||||
let config = AuditConfig {
|
||||
enabled: true,
|
||||
max_size_mb: 0, // Force rotation on first write
|
||||
..Default::default()
|
||||
};
|
||||
let logger = AuditLogger::new(config, tmp.path().to_path_buf())?;
|
||||
|
||||
// Write initial content that triggers rotation
|
||||
let log_path = tmp.path().join("audit.log");
|
||||
std::fs::write(&log_path, "initial content\n")?;
|
||||
|
||||
let event = AuditEvent::new(AuditEventType::CommandExecution);
|
||||
logger.log(&event)?;
|
||||
|
||||
let rotated = format!("{}.1.log", log_path.display());
|
||||
assert!(
|
||||
std::path::Path::new(&rotated).exists(),
|
||||
"rotation must create .1.log backup"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,4 +94,90 @@ mod tests {
|
|||
// Either way, the name should still work
|
||||
assert_eq!(sandbox.name(), "bubblewrap");
|
||||
}
|
||||
|
||||
// ── §1.1 Sandbox isolation flag tests ──────────────────────
|
||||
|
||||
#[test]
|
||||
fn bubblewrap_wrap_command_includes_isolation_flags() {
|
||||
let sandbox = BubblewrapSandbox;
|
||||
let mut cmd = Command::new("echo");
|
||||
cmd.arg("hello");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
cmd.get_program().to_string_lossy(),
|
||||
"bwrap",
|
||||
"wrapped command should use bwrap as program"
|
||||
);
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"--unshare-all".to_string()),
|
||||
"must include --unshare-all for namespace isolation"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--die-with-parent".to_string()),
|
||||
"must include --die-with-parent to prevent orphan processes"
|
||||
);
|
||||
assert!(
|
||||
!args.contains(&"--share-net".to_string()),
|
||||
"must NOT include --share-net (network should be blocked)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bubblewrap_wrap_command_preserves_original_command() {
|
||||
let sandbox = BubblewrapSandbox;
|
||||
let mut cmd = Command::new("ls");
|
||||
cmd.arg("-la");
|
||||
cmd.arg("/tmp");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"ls".to_string()),
|
||||
"original program must be passed as argument"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"-la".to_string()),
|
||||
"original args must be preserved"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"/tmp".to_string()),
|
||||
"original args must be preserved"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bubblewrap_wrap_command_binds_required_paths() {
|
||||
let sandbox = BubblewrapSandbox;
|
||||
let mut cmd = Command::new("echo");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"--ro-bind".to_string()),
|
||||
"must include read-only bind for /usr"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--dev".to_string()),
|
||||
"must include /dev mount"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--proc".to_string()),
|
||||
"must include /proc mount"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,4 +117,100 @@ mod tests {
|
|||
Err(_) => assert!(!DockerSandbox::is_installed()),
|
||||
}
|
||||
}
|
||||
|
||||
// ── §1.1 Sandbox isolation flag tests ──────────────────────
|
||||
|
||||
#[test]
|
||||
fn docker_wrap_command_includes_isolation_flags() {
|
||||
let sandbox = DockerSandbox::default();
|
||||
let mut cmd = Command::new("echo");
|
||||
cmd.arg("hello");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
cmd.get_program().to_string_lossy(),
|
||||
"docker",
|
||||
"wrapped command should use docker as program"
|
||||
);
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"run".to_string()),
|
||||
"must include 'run' subcommand"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--rm".to_string()),
|
||||
"must include --rm for auto-cleanup"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--network".to_string()),
|
||||
"must include --network flag"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"none".to_string()),
|
||||
"network must be set to 'none' for isolation"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--memory".to_string()),
|
||||
"must include --memory limit"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"512m".to_string()),
|
||||
"memory limit must be 512m"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"--cpus".to_string()),
|
||||
"must include --cpus limit"
|
||||
);
|
||||
assert!(args.contains(&"1.0".to_string()), "CPU limit must be 1.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_wrap_command_preserves_original_command() {
|
||||
let sandbox = DockerSandbox::default();
|
||||
let mut cmd = Command::new("ls");
|
||||
cmd.arg("-la");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"alpine:latest".to_string()),
|
||||
"must include the container image"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"ls".to_string()),
|
||||
"original program must be passed as argument"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"-la".to_string()),
|
||||
"original args must be preserved"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_wrap_command_uses_custom_image() {
|
||||
let sandbox = DockerSandbox {
|
||||
image: "ubuntu:22.04".to_string(),
|
||||
};
|
||||
let mut cmd = Command::new("echo");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"ubuntu:22.04".to_string()),
|
||||
"must use the custom image"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -125,4 +125,71 @@ mod tests {
|
|||
assert_eq!(cmd.get_program().to_string_lossy(), "firejail");
|
||||
}
|
||||
}
|
||||
|
||||
// ── §1.1 Sandbox isolation flag tests ──────────────────────
|
||||
|
||||
#[test]
|
||||
fn firejail_wrap_command_includes_all_security_flags() {
|
||||
let sandbox = FirejailSandbox;
|
||||
let mut cmd = Command::new("echo");
|
||||
cmd.arg("test");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
cmd.get_program().to_string_lossy(),
|
||||
"firejail",
|
||||
"wrapped command should use firejail as program"
|
||||
);
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
let expected_flags = [
|
||||
"--private=home",
|
||||
"--private-dev",
|
||||
"--nosound",
|
||||
"--no3d",
|
||||
"--novideo",
|
||||
"--nowheel",
|
||||
"--notv",
|
||||
"--noprofile",
|
||||
"--quiet",
|
||||
];
|
||||
|
||||
for flag in &expected_flags {
|
||||
assert!(
|
||||
args.contains(&flag.to_string()),
|
||||
"must include security flag: {flag}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn firejail_wrap_command_preserves_original_command() {
|
||||
let sandbox = FirejailSandbox;
|
||||
let mut cmd = Command::new("ls");
|
||||
cmd.arg("-la");
|
||||
cmd.arg("/workspace");
|
||||
sandbox.wrap_command(&mut cmd).unwrap();
|
||||
|
||||
let args: Vec<String> = cmd
|
||||
.get_args()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
args.contains(&"ls".to_string()),
|
||||
"original program must be passed as argument"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"-la".to_string()),
|
||||
"original args must be preserved"
|
||||
);
|
||||
assert!(
|
||||
args.contains(&"/workspace".to_string()),
|
||||
"original args must be preserved"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -231,4 +231,31 @@ mod tests {
|
|||
))),
|
||||
}
|
||||
}
|
||||
|
||||
// ── §1.1 Landlock stub tests ──────────────────────────────
|
||||
|
||||
#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))]
|
||||
#[test]
|
||||
fn landlock_stub_wrap_command_returns_unsupported() {
|
||||
let sandbox = LandlockSandbox;
|
||||
let mut cmd = std::process::Command::new("echo");
|
||||
let result = sandbox.wrap_command(&mut cmd);
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().kind(), std::io::ErrorKind::Unsupported);
|
||||
}
|
||||
|
||||
#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))]
|
||||
#[test]
|
||||
fn landlock_stub_new_returns_unsupported() {
|
||||
let result = LandlockSandbox::new();
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().kind(), std::io::ErrorKind::Unsupported);
|
||||
}
|
||||
|
||||
#[cfg(not(all(feature = "sandbox-landlock", target_os = "linux")))]
|
||||
#[test]
|
||||
fn landlock_stub_probe_returns_unsupported() {
|
||||
let result = LandlockSandbox::probe();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1388,4 +1388,112 @@ mod tests {
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── §1.2 Path resolution / symlink bypass tests ──────────
|
||||
|
||||
#[test]
|
||||
fn resolved_path_blocks_outside_workspace() {
|
||||
let workspace = std::env::temp_dir().join("zeroclaw_test_resolved_path");
|
||||
let _ = std::fs::create_dir_all(&workspace);
|
||||
|
||||
// Use the canonicalized workspace so starts_with checks match
|
||||
let canonical_workspace = workspace
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| workspace.clone());
|
||||
|
||||
let policy = SecurityPolicy {
|
||||
workspace_dir: canonical_workspace.clone(),
|
||||
..SecurityPolicy::default()
|
||||
};
|
||||
|
||||
// A resolved path inside the workspace should be allowed
|
||||
let inside = canonical_workspace.join("subdir").join("file.txt");
|
||||
assert!(
|
||||
policy.is_resolved_path_allowed(&inside),
|
||||
"path inside workspace should be allowed"
|
||||
);
|
||||
|
||||
// A resolved path outside the workspace should be blocked
|
||||
let canonical_temp = std::env::temp_dir()
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| std::env::temp_dir());
|
||||
let outside = canonical_temp.join("outside_workspace_zeroclaw");
|
||||
assert!(
|
||||
!policy.is_resolved_path_allowed(&outside),
|
||||
"path outside workspace must be blocked"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&workspace);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolved_path_blocks_root_escape() {
|
||||
let policy = SecurityPolicy {
|
||||
workspace_dir: PathBuf::from("/home/zeroclaw_user/project"),
|
||||
..SecurityPolicy::default()
|
||||
};
|
||||
|
||||
assert!(
|
||||
!policy.is_resolved_path_allowed(Path::new("/etc/passwd")),
|
||||
"resolved path to /etc/passwd must be blocked"
|
||||
);
|
||||
assert!(
|
||||
!policy.is_resolved_path_allowed(Path::new("/root/.bashrc")),
|
||||
"resolved path to /root/.bashrc must be blocked"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn resolved_path_blocks_symlink_escape() {
|
||||
use std::os::unix::fs::symlink;
|
||||
|
||||
let root = std::env::temp_dir().join("zeroclaw_test_symlink_escape");
|
||||
let workspace = root.join("workspace");
|
||||
let outside = root.join("outside_target");
|
||||
|
||||
let _ = std::fs::remove_dir_all(&root);
|
||||
std::fs::create_dir_all(&workspace).unwrap();
|
||||
std::fs::create_dir_all(&outside).unwrap();
|
||||
|
||||
// Create a symlink inside workspace pointing outside
|
||||
let link_path = workspace.join("escape_link");
|
||||
symlink(&outside, &link_path).unwrap();
|
||||
|
||||
let policy = SecurityPolicy {
|
||||
workspace_dir: workspace.clone(),
|
||||
..SecurityPolicy::default()
|
||||
};
|
||||
|
||||
// The resolved symlink target should be outside workspace
|
||||
let resolved = link_path.canonicalize().unwrap();
|
||||
assert!(
|
||||
!policy.is_resolved_path_allowed(&resolved),
|
||||
"symlink-resolved path outside workspace must be blocked"
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_dir_all(&root);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_path_allowed_blocks_null_bytes() {
|
||||
let policy = default_policy();
|
||||
assert!(
|
||||
!policy.is_path_allowed("file\0.txt"),
|
||||
"paths with null bytes must be blocked"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_path_allowed_blocks_url_encoded_traversal() {
|
||||
let policy = default_policy();
|
||||
assert!(
|
||||
!policy.is_path_allowed("..%2fetc%2fpasswd"),
|
||||
"URL-encoded path traversal must be blocked"
|
||||
);
|
||||
assert!(
|
||||
!policy.is_path_allowed("subdir%2f..%2f..%2fetc"),
|
||||
"URL-encoded parent dir traversal must be blocked"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -493,4 +493,85 @@ mod tests {
|
|||
.unwrap_or("")
|
||||
.contains("Rate limit exceeded"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delegate_context_is_prepended_to_prompt() {
|
||||
let mut agents = HashMap::new();
|
||||
agents.insert(
|
||||
"tester".to_string(),
|
||||
DelegateAgentConfig {
|
||||
provider: "invalid-for-test".to_string(),
|
||||
model: "test-model".to_string(),
|
||||
system_prompt: None,
|
||||
api_key: None,
|
||||
temperature: None,
|
||||
max_depth: 3,
|
||||
},
|
||||
);
|
||||
let tool = DelegateTool::new(agents, None, test_security());
|
||||
let result = tool
|
||||
.execute(json!({
|
||||
"agent": "tester",
|
||||
"prompt": "do something",
|
||||
"context": "some context data"
|
||||
}))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.success);
|
||||
assert!(result
|
||||
.error
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.contains("Failed to create provider"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delegate_empty_context_omits_prefix() {
|
||||
let mut agents = HashMap::new();
|
||||
agents.insert(
|
||||
"tester".to_string(),
|
||||
DelegateAgentConfig {
|
||||
provider: "invalid-for-test".to_string(),
|
||||
model: "test-model".to_string(),
|
||||
system_prompt: None,
|
||||
api_key: None,
|
||||
temperature: None,
|
||||
max_depth: 3,
|
||||
},
|
||||
);
|
||||
let tool = DelegateTool::new(agents, None, test_security());
|
||||
let result = tool
|
||||
.execute(json!({
|
||||
"agent": "tester",
|
||||
"prompt": "do something",
|
||||
"context": ""
|
||||
}))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.success);
|
||||
assert!(result
|
||||
.error
|
||||
.as_deref()
|
||||
.unwrap_or("")
|
||||
.contains("Failed to create provider"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delegate_depth_construction() {
|
||||
let tool = DelegateTool::with_depth(sample_agents(), None, test_security(), 5);
|
||||
assert_eq!(tool.depth, 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn delegate_no_agents_configured() {
|
||||
let tool = DelegateTool::new(HashMap::new(), None, test_security());
|
||||
let result = tool
|
||||
.execute(json!({"agent": "any", "prompt": "test"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.unwrap().contains("none configured"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -407,4 +407,62 @@ mod tests {
|
|||
|
||||
let _ = tokio::fs::remove_dir_all(&dir).await;
|
||||
}
|
||||
|
||||
// ── §5.1 TOCTOU / symlink file write protection tests ────
|
||||
|
||||
#[cfg(unix)]
|
||||
#[tokio::test]
|
||||
async fn file_write_blocks_symlink_target_file() {
|
||||
use std::os::unix::fs::symlink;
|
||||
|
||||
let root = std::env::temp_dir().join("zeroclaw_test_file_write_symlink_target");
|
||||
let workspace = root.join("workspace");
|
||||
let outside = root.join("outside");
|
||||
|
||||
let _ = tokio::fs::remove_dir_all(&root).await;
|
||||
tokio::fs::create_dir_all(&workspace).await.unwrap();
|
||||
tokio::fs::create_dir_all(&outside).await.unwrap();
|
||||
|
||||
// Create a file outside and symlink to it inside workspace
|
||||
tokio::fs::write(outside.join("target.txt"), "original")
|
||||
.await
|
||||
.unwrap();
|
||||
symlink(outside.join("target.txt"), workspace.join("linked.txt")).unwrap();
|
||||
|
||||
let tool = FileWriteTool::new(test_security(workspace.clone()));
|
||||
let result = tool
|
||||
.execute(json!({"path": "linked.txt", "content": "overwritten"}))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(!result.success, "writing through symlink must be blocked");
|
||||
assert!(
|
||||
result.error.as_deref().unwrap_or("").contains("symlink"),
|
||||
"error should mention symlink"
|
||||
);
|
||||
|
||||
// Verify original file was not modified
|
||||
let content = tokio::fs::read_to_string(outside.join("target.txt"))
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(content, "original", "original file must not be modified");
|
||||
|
||||
let _ = tokio::fs::remove_dir_all(&root).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn file_write_blocks_null_byte_in_path() {
|
||||
let dir = std::env::temp_dir().join("zeroclaw_test_file_write_null");
|
||||
let _ = tokio::fs::remove_dir_all(&dir).await;
|
||||
tokio::fs::create_dir_all(&dir).await.unwrap();
|
||||
|
||||
let tool = FileWriteTool::new(test_security(dir.clone()));
|
||||
let result = tool
|
||||
.execute(json!({"path": "file\u{0000}.txt", "content": "bad"}))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!result.success, "paths with null bytes must be blocked");
|
||||
|
||||
let _ = tokio::fs::remove_dir_all(&dir).await;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -808,4 +808,73 @@ mod tests {
|
|||
let tool = test_tool(vec!["example.com"]);
|
||||
assert_eq!(tool.name(), "http_request");
|
||||
}
|
||||
|
||||
// ── §1.4 DNS rebinding / SSRF defense-in-depth tests ─────
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_loopback_127_range() {
|
||||
assert!(is_private_or_local_host("127.0.0.1"));
|
||||
assert!(is_private_or_local_host("127.0.0.2"));
|
||||
assert!(is_private_or_local_host("127.255.255.255"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_rfc1918_10_range() {
|
||||
assert!(is_private_or_local_host("10.0.0.1"));
|
||||
assert!(is_private_or_local_host("10.255.255.255"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_rfc1918_172_range() {
|
||||
assert!(is_private_or_local_host("172.16.0.1"));
|
||||
assert!(is_private_or_local_host("172.31.255.255"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_unspecified_address() {
|
||||
assert!(is_private_or_local_host("0.0.0.0"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_dot_localhost_subdomain() {
|
||||
assert!(is_private_or_local_host("evil.localhost"));
|
||||
assert!(is_private_or_local_host("a.b.localhost"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_blocks_dot_local_tld() {
|
||||
assert!(is_private_or_local_host("service.local"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ssrf_ipv6_unspecified() {
|
||||
assert!(is_private_or_local_host("::"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_ftp_scheme() {
|
||||
let tool = test_tool(vec!["example.com"]);
|
||||
let err = tool
|
||||
.validate_url("ftp://example.com")
|
||||
.unwrap_err()
|
||||
.to_string();
|
||||
assert!(err.contains("http://") || err.contains("https://"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_empty_url() {
|
||||
let tool = test_tool(vec!["example.com"]);
|
||||
let err = tool.validate_url("").unwrap_err().to_string();
|
||||
assert!(err.contains("empty"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_rejects_ipv6_host() {
|
||||
let tool = test_tool(vec!["example.com"]);
|
||||
let err = tool
|
||||
.validate_url("http://[::1]:8080/path")
|
||||
.unwrap_err()
|
||||
.to_string();
|
||||
assert!(err.contains("IPv6"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -365,4 +365,62 @@ mod tests {
|
|||
|
||||
let _ = std::fs::remove_file(std::env::temp_dir().join("zeroclaw_shell_approval_test"));
|
||||
}
|
||||
|
||||
// ── §5.2 Shell timeout enforcement tests ─────────────────
|
||||
|
||||
#[test]
|
||||
fn shell_timeout_constant_is_reasonable() {
|
||||
assert_eq!(SHELL_TIMEOUT_SECS, 60, "shell timeout must be 60 seconds");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shell_output_limit_is_1mb() {
|
||||
assert_eq!(
|
||||
MAX_OUTPUT_BYTES, 1_048_576,
|
||||
"max output must be 1 MB to prevent OOM"
|
||||
);
|
||||
}
|
||||
|
||||
// ── §5.3 Non-UTF8 binary output tests ────────────────────
|
||||
|
||||
#[test]
|
||||
fn shell_safe_env_vars_excludes_secrets() {
|
||||
for var in SAFE_ENV_VARS {
|
||||
let lower = var.to_lowercase();
|
||||
assert!(
|
||||
!lower.contains("key") && !lower.contains("secret") && !lower.contains("token"),
|
||||
"SAFE_ENV_VARS must not include sensitive variable: {var}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shell_safe_env_vars_includes_essentials() {
|
||||
assert!(
|
||||
SAFE_ENV_VARS.contains(&"PATH"),
|
||||
"PATH must be in safe env vars"
|
||||
);
|
||||
assert!(
|
||||
SAFE_ENV_VARS.contains(&"HOME"),
|
||||
"HOME must be in safe env vars"
|
||||
);
|
||||
assert!(
|
||||
SAFE_ENV_VARS.contains(&"TERM"),
|
||||
"TERM must be in safe env vars"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn shell_blocks_rate_limited() {
|
||||
let security = Arc::new(SecurityPolicy {
|
||||
autonomy: AutonomyLevel::Supervised,
|
||||
max_actions_per_hour: 0,
|
||||
workspace_dir: std::env::temp_dir(),
|
||||
..SecurityPolicy::default()
|
||||
});
|
||||
let tool = ShellTool::new(security, test_runtime());
|
||||
let result = tool.execute(json!({"command": "echo test"})).await.unwrap();
|
||||
assert!(!result.success);
|
||||
assert!(result.error.as_deref().unwrap_or("").contains("Rate limit"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue