diff --git a/Cargo.toml b/Cargo.toml
index d1ba9ed..15d4665 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -7,7 +7,7 @@ name = "zeroclaw"
version = "0.1.0"
edition = "2021"
authors = ["theonlyhennygod"]
-license = "MIT"
+license = "Apache-2.0"
description = "Zero overhead. Zero compromise. 100% Rust. The fastest, smallest AI assistant."
repository = "https://github.com/zeroclaw-labs/zeroclaw"
readme = "README.md"
diff --git a/Dockerfile b/Dockerfile
index e79f2d9..693e4de 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,32 +1,35 @@
-# syntax=docker/dockerfile:1
+# syntax=docker/dockerfile:1.7
# ── Stage 1: Build ────────────────────────────────────────────
-FROM rust:1.93-slim-trixie@sha256:9663b80a1621253d30b146454f903de48f0af925c967be48c84745537cd35d8b AS builder
+FROM rust:1.92-slim@sha256:bf3368a992915f128293ac76917ab6e561e4dda883273c8f5c9f6f8ea37a378e AS builder
WORKDIR /app
# Install build dependencies
-RUN apt-get update && apt-get install -y \
- pkg-config \
+RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update && apt-get install -y \
+ pkg-config \
&& rm -rf /var/lib/apt/lists/*
# 1. Copy manifests to cache dependencies
COPY Cargo.toml Cargo.lock ./
# Create dummy main.rs to build dependencies
RUN mkdir src && echo "fn main() {}" > src/main.rs
-RUN --mount=type=cache,target=/usr/local/cargo/registry \
- --mount=type=cache,target=/usr/local/cargo/git \
+RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
+ --mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
+ --mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
cargo build --release --locked
RUN rm -rf src
# 2. Copy source code
COPY . .
-# Touch main.rs to force rebuild
-RUN touch src/main.rs
-RUN --mount=type=cache,target=/usr/local/cargo/registry \
- --mount=type=cache,target=/usr/local/cargo/git \
+RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
+ --mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
+ --mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
cargo build --release --locked && \
- strip target/release/zeroclaw
+ cp target/release/zeroclaw /app/zeroclaw && \
+ strip /app/zeroclaw
# ── Stage 2: Permissions & Config Prep ───────────────────────
FROM busybox:1.37@sha256:b3255e7dfbcd10cb367af0d409747d511aeb66dfac98cf30e97e87e4207dd76f AS permissions
@@ -35,7 +38,7 @@ RUN mkdir -p /zeroclaw-data/.zeroclaw /zeroclaw-data/workspace
# Create minimal config for PRODUCTION (allows binding to public interfaces)
# NOTE: Provider configuration must be done via environment variables at runtime
-RUN cat > /zeroclaw-data/.zeroclaw/config.toml << 'EOF'
+RUN cat > /zeroclaw-data/.zeroclaw/config.toml <
-
Fast, small, and fully autonomous AI assistant infrastructure — deploy anywhere, swap anything.
@@ -616,12 +615,6 @@ For high-throughput collaboration and consistent reviews:
- CI ownership and triage map: [docs/ci-map.md](docs/ci-map.md)
- Security disclosure policy: [SECURITY.md](SECURITY.md)
-## Support
-
-ZeroClaw is an open-source project maintained with passion. If you find it useful and would like to support its continued development, hardware for testing, and coffee for the maintainer, you can support me here:
-
-
-
### 🙏 Special Thanks
A heartfelt thank you to the communities and institutions that inspire and fuel this open-source work:
diff --git a/dev/README.md b/dev/README.md
index 12fcb4b..427b566 100644
--- a/dev/README.md
+++ b/dev/README.md
@@ -163,5 +163,7 @@ Note: local `deny` focuses on license/source policy; advisory scanning is handle
### Build cache notes
- Both `Dockerfile` and `dev/ci/Dockerfile` use BuildKit cache mounts for Cargo registry/git data.
+- The root `Dockerfile` also caches Rust `target/` (`id=zeroclaw-target`) to speed repeat local image builds.
- Local CI reuses named Docker volumes for Cargo registry/git and target outputs.
+- `./dev/ci.sh docker-smoke` and `./dev/ci.sh all` now use `docker buildx` local cache at `.cache/buildx-smoke` when available.
- The CI image keeps Rust toolchain defaults from `rust:1.92-slim` and installs pinned toolchain `1.92.0` (no custom `CARGO_HOME`/`RUSTUP_HOME` overrides), preventing repeated toolchain bootstrapping on each run.
diff --git a/dev/ci.sh b/dev/ci.sh
index 61bf73b..a348a19 100755
--- a/dev/ci.sh
+++ b/dev/ci.sh
@@ -11,12 +11,32 @@ else
fi
compose_cmd=(docker compose -f "$COMPOSE_FILE")
+SMOKE_CACHE_DIR="${SMOKE_CACHE_DIR:-.cache/buildx-smoke}"
run_in_ci() {
local cmd="$1"
"${compose_cmd[@]}" run --rm local-ci bash -c "$cmd"
}
+build_smoke_image() {
+ if docker buildx version >/dev/null 2>&1; then
+ mkdir -p "$SMOKE_CACHE_DIR"
+ local build_args=(
+ --load
+ --target dev
+ --cache-to "type=local,dest=$SMOKE_CACHE_DIR,mode=max"
+ -t zeroclaw-local-smoke:latest
+ .
+ )
+ if [ -f "$SMOKE_CACHE_DIR/index.json" ]; then
+ build_args=(--cache-from "type=local,src=$SMOKE_CACHE_DIR" "${build_args[@]}")
+ fi
+ docker buildx build "${build_args[@]}"
+ else
+ DOCKER_BUILDKIT=1 docker build --target dev -t zeroclaw-local-smoke:latest .
+ fi
+}
+
print_help() {
cat <<'EOF'
ZeroClaw Local CI in Docker
@@ -88,7 +108,7 @@ case "$1" in
;;
docker-smoke)
- docker build --target dev -t zeroclaw-local-smoke:latest .
+ build_smoke_image
docker run --rm zeroclaw-local-smoke:latest --version
;;
@@ -98,7 +118,7 @@ case "$1" in
run_in_ci "cargo build --release --locked --verbose"
run_in_ci "cargo deny check licenses sources"
run_in_ci "cargo audit"
- docker build --target dev -t zeroclaw-local-smoke:latest .
+ build_smoke_image
docker run --rm zeroclaw-local-smoke:latest --version
;;
diff --git a/src/agent/agent.rs b/src/agent/agent.rs
index 4495736..3e5693e 100644
--- a/src/agent/agent.rs
+++ b/src/agent/agent.rs
@@ -568,7 +568,7 @@ pub async fn run(
mod tests {
use super::*;
use async_trait::async_trait;
- use std::sync::Mutex;
+ use parking_lot::Mutex;
struct MockProvider {
responses: Mutex>,
@@ -592,7 +592,7 @@ mod tests {
_model: &str,
_temperature: f64,
) -> Result {
- let mut guard = self.responses.lock().unwrap();
+ let mut guard = self.responses.lock();
if guard.is_empty() {
return Ok(crate::providers::ChatResponse {
text: Some("done".into()),
diff --git a/src/channels/discord.rs b/src/channels/discord.rs
index 10578d2..9f7d429 100644
--- a/src/channels/discord.rs
+++ b/src/channels/discord.rs
@@ -363,11 +363,7 @@ impl Channel for DiscordChannel {
};
let message_id = d.get("id").and_then(|i| i.as_str()).unwrap_or("");
- let channel_id = d
- .get("channel_id")
- .and_then(|c| c.as_str())
- .unwrap_or("")
- .to_string();
+ let channel_id = d.get("channel_id").and_then(|c| c.as_str()).unwrap_or("").to_string();
let channel_msg = ChannelMessage {
id: if message_id.is_empty() {
@@ -379,10 +375,10 @@ impl Channel for DiscordChannel {
reply_target: if channel_id.is_empty() {
author_id.to_string()
} else {
- channel_id
+ channel_id.clone()
},
- content: content.to_string(),
- channel: "discord".to_string(),
+ content: clean_content,
+ channel: channel_id,
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
diff --git a/src/channels/email_channel.rs b/src/channels/email_channel.rs
index 709ba18..e59e0ac 100644
--- a/src/channels/email_channel.rs
+++ b/src/channels/email_channel.rs
@@ -14,11 +14,11 @@ use lettre::message::SinglePart;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{Message, SmtpTransport, Transport};
use mail_parser::{MessageParser, MimeHeaders};
+use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::io::Write as IoWrite;
use std::net::TcpStream;
-use std::sync::Mutex;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc;
use tokio::time::{interval, sleep};
@@ -413,10 +413,7 @@ impl Channel for EmailChannel {
Ok(Ok(messages)) => {
for (id, sender, content, ts) in messages {
{
- let mut seen = self
- .seen_messages
- .lock()
- .expect("seen_messages mutex should not be poisoned");
+ let mut seen = self.seen_messages.lock();
if seen.contains(&id) {
continue;
}
@@ -488,20 +485,14 @@ mod tests {
#[test]
fn seen_messages_starts_empty() {
let channel = EmailChannel::new(EmailConfig::default());
- let seen = channel
- .seen_messages
- .lock()
- .expect("seen_messages mutex should not be poisoned");
+ let seen = channel.seen_messages.lock();
assert!(seen.is_empty());
}
#[test]
fn seen_messages_tracks_unique_ids() {
let channel = EmailChannel::new(EmailConfig::default());
- let mut seen = channel
- .seen_messages
- .lock()
- .expect("seen_messages mutex should not be poisoned");
+ let mut seen = channel.seen_messages.lock();
assert!(seen.insert("first-id".to_string()));
assert!(!seen.insert("first-id".to_string()));
@@ -576,10 +567,7 @@ mod tests {
let channel = EmailChannel::new(config.clone());
assert_eq!(channel.config.imap_host, config.imap_host);
- let seen_guard = channel
- .seen_messages
- .lock()
- .expect("seen_messages mutex should not be poisoned");
+ let seen_guard = channel.seen_messages.lock();
assert_eq!(seen_guard.len(), 0);
}
diff --git a/src/gateway/mod.rs b/src/gateway/mod.rs
index 001fc35..7c618ed 100644
--- a/src/gateway/mod.rs
+++ b/src/gateway/mod.rs
@@ -25,9 +25,10 @@ use axum::{
routing::{get, post},
Router,
};
+use parking_lot::Mutex;
use std::collections::HashMap;
use std::net::SocketAddr;
-use std::sync::{Arc, Mutex};
+use std::sync::Arc;
use std::time::{Duration, Instant};
use tower_http::limit::RequestBodyLimitLayer;
use tower_http::timeout::TimeoutLayer;
@@ -82,10 +83,7 @@ impl SlidingWindowRateLimiter {
let now = Instant::now();
let cutoff = now.checked_sub(self.window).unwrap_or_else(Instant::now);
- let mut guard = self
- .requests
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner);
+ let mut guard = self.requests.lock();
let (requests, last_sweep) = &mut *guard;
// Periodic sweep: remove IPs with no recent requests
@@ -150,10 +148,7 @@ impl IdempotencyStore {
/// Returns true if this key is new and is now recorded.
fn record_if_new(&self, key: &str) -> bool {
let now = Instant::now();
- let mut keys = self
- .keys
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner);
+ let mut keys = self.keys.lock();
keys.retain(|_, seen_at| now.duration_since(*seen_at) < self.ttl);
@@ -739,8 +734,8 @@ mod tests {
use axum::http::HeaderValue;
use axum::response::IntoResponse;
use http_body_util::BodyExt;
+ use parking_lot::Mutex;
use std::sync::atomic::{AtomicUsize, Ordering};
- use std::sync::Mutex;
#[test]
fn security_body_limit_is_64kb() {
@@ -797,19 +792,13 @@ mod tests {
assert!(limiter.allow("ip-3"));
{
- let guard = limiter
- .requests
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner);
+ let guard = limiter.requests.lock();
assert_eq!(guard.0.len(), 3);
}
// Force a sweep by backdating last_sweep
{
- let mut guard = limiter
- .requests
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner);
+ let mut guard = limiter.requests.lock();
guard.1 = Instant::now()
.checked_sub(Duration::from_secs(RATE_LIMITER_SWEEP_INTERVAL_SECS + 1))
.unwrap();
@@ -822,10 +811,7 @@ mod tests {
assert!(limiter.allow("ip-1"));
{
- let guard = limiter
- .requests
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner);
+ let guard = limiter.requests.lock();
assert_eq!(guard.0.len(), 1, "Stale entries should have been swept");
assert!(guard.0.contains_key("ip-1"));
}
@@ -962,10 +948,7 @@ mod tests {
_category: MemoryCategory,
_session_id: Option<&str>,
) -> anyhow::Result<()> {
- self.keys
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner)
- .push(key.to_string());
+ self.keys.lock().push(key.to_string());
Ok(())
}
@@ -995,11 +978,7 @@ mod tests {
}
async fn count(&self) -> anyhow::Result {
- let size = self
- .keys
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner)
- .len();
+ let size = self.keys.lock().len();
Ok(size)
}
@@ -1094,11 +1073,7 @@ mod tests {
.into_response();
assert_eq!(second.status(), StatusCode::OK);
- let keys = tracking_impl
- .keys
- .lock()
- .unwrap_or_else(std::sync::PoisonError::into_inner)
- .clone();
+ let keys = tracking_impl.keys.lock().clone();
assert_eq!(keys.len(), 2);
assert_ne!(keys[0], keys[1]);
assert!(keys[0].starts_with("webhook_msg_"));
diff --git a/src/memory/lucid.rs b/src/memory/lucid.rs
index 454d0dc..7ea75a0 100644
--- a/src/memory/lucid.rs
+++ b/src/memory/lucid.rs
@@ -2,9 +2,9 @@ use super::sqlite::SqliteMemory;
use super::traits::{Memory, MemoryCategory, MemoryEntry};
use async_trait::async_trait;
use chrono::Local;
+use parking_lot::Mutex;
use std::collections::HashSet;
use std::path::{Path, PathBuf};
-use std::sync::Mutex;
use std::time::{Duration, Instant};
use tokio::process::Command;
use tokio::time::timeout;
@@ -116,9 +116,7 @@ impl LucidMemory {
}
fn in_failure_cooldown(&self) -> bool {
- let Ok(guard) = self.last_failure_at.lock() else {
- return false;
- };
+ let guard = self.last_failure_at.lock();
guard
.as_ref()
@@ -126,15 +124,11 @@ impl LucidMemory {
}
fn mark_failure_now(&self) {
- if let Ok(mut guard) = self.last_failure_at.lock() {
- *guard = Some(Instant::now());
- }
+ *self.last_failure_at.lock() = Some(Instant::now());
}
fn clear_failure(&self) {
- if let Ok(mut guard) = self.last_failure_at.lock() {
- *guard = None;
- }
+ *self.last_failure_at.lock() = None;
}
fn to_lucid_type(category: &MemoryCategory) -> &'static str {
@@ -565,11 +559,12 @@ exit 1
"local_note",
"Local sqlite auth fallback note",
MemoryCategory::Core,
+ None,
)
.await
.unwrap();
- let entries = memory.recall("auth", 5).await.unwrap();
+ let entries = memory.recall("auth", 5, None).await.unwrap();
assert!(entries
.iter()
diff --git a/src/memory/response_cache.rs b/src/memory/response_cache.rs
index e7fb3f2..62fae6c 100644
--- a/src/memory/response_cache.rs
+++ b/src/memory/response_cache.rs
@@ -7,10 +7,10 @@
use anyhow::Result;
use chrono::{Duration, Local};
+use parking_lot::Mutex;
use rusqlite::{params, Connection};
use sha2::{Digest, Sha256};
use std::path::{Path, PathBuf};
-use std::sync::Mutex;
/// Response cache backed by a dedicated SQLite database.
///
@@ -77,10 +77,7 @@ impl ResponseCache {
/// Look up a cached response. Returns `None` on miss or expired entry.
pub fn get(&self, key: &str) -> Result