diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..8fd5e96
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,66 @@
+# Git history (may contain old secrets)
+.git
+.gitignore
+.githooks
+
+# Rust build artifacts (can be multiple GB)
+target
+
+# Documentation and examples (not needed for runtime)
+docs
+examples
+tests
+
+# Markdown files (README, CHANGELOG, etc.)
+*.md
+
+# Images (unnecessary for build)
+*.png
+*.svg
+*.jpg
+*.jpeg
+*.gif
+
+# SQLite databases (conversation history, cron jobs)
+*.db
+*.db-journal
+
+# macOS artifacts
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# CI/CD configs (not needed in image)
+.github
+
+# Cargo deny config (lint tool, not runtime)
+deny.toml
+
+# License file (not needed for runtime)
+LICENSE
+
+# Temporary files
+.tmp_*
+*.tmp
+*.bak
+*.swp
+*~
+
+# IDE and editor configs
+.idea
+.vscode
+*.iml
+
+# Windsurf workflows
+.windsurf
+
+# Environment files (may contain secrets)
+.env
+.env.*
+!.env.example
+
+# Coverage and profiling
+*.profraw
+*.profdata
+coverage
+lcov.info
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 920fdfa..50b0524 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -63,3 +63,40 @@ jobs:
with:
name: zeroclaw-${{ matrix.target }}
path: target/${{ matrix.target }}/release/zeroclaw*
+
+ docker:
+ name: Docker Security
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Build Docker image
+ run: docker build -t zeroclaw:test .
+
+ - name: Verify non-root user (UID != 0)
+ run: |
+ USER_ID=$(docker inspect --format='{{.Config.User}}' zeroclaw:test)
+ echo "Container user: $USER_ID"
+ if [ "$USER_ID" = "0" ] || [ "$USER_ID" = "root" ] || [ -z "$USER_ID" ]; then
+ echo "❌ FAIL: Container runs as root (UID 0)"
+ exit 1
+ fi
+ echo "✅ PASS: Container runs as non-root user ($USER_ID)"
+
+ - name: Verify distroless nonroot base image
+ run: |
+ BASE_IMAGE=$(grep -E '^FROM.*runtime|^FROM gcr.io/distroless' Dockerfile | tail -1)
+ echo "Base image line: $BASE_IMAGE"
+ if ! echo "$BASE_IMAGE" | grep -q ':nonroot'; then
+ echo "❌ FAIL: Runtime stage does not use :nonroot variant"
+ exit 1
+ fi
+ echo "✅ PASS: Using distroless :nonroot variant"
+
+ - name: Verify USER directive exists
+ run: |
+ if ! grep -qE '^USER\s+[0-9]+' Dockerfile; then
+ echo "❌ FAIL: No explicit USER directive with numeric UID"
+ exit 1
+ fi
+ echo "✅ PASS: Explicit USER directive found"
diff --git a/.tmp_todo_probe b/.tmp_todo_probe
new file mode 100644
index 0000000..e69de29
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8ec9d30..e1ac7be 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,24 @@ All notable changes to ZeroClaw will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [Unreleased]
+
+### Security
+- **Legacy XOR cipher migration**: The `enc:` prefix (XOR cipher) is now deprecated.
+ Secrets using this format will be automatically migrated to `enc2:` (ChaCha20-Poly1305 AEAD)
+ when decrypted via `decrypt_and_migrate()`. A `tracing::warn!` is emitted when legacy
+ values are encountered. The XOR cipher will be removed in a future release.
+
+### Added
+- `SecretStore::decrypt_and_migrate()` — Decrypts secrets and returns a migrated `enc2:`
+ value if the input used the legacy `enc:` format
+- `SecretStore::needs_migration()` — Check if a value uses the legacy `enc:` format
+- `SecretStore::is_secure_encrypted()` — Check if a value uses the secure `enc2:` format
+
+### Deprecated
+- `enc:` prefix for encrypted secrets — Use `enc2:` (ChaCha20-Poly1305) instead.
+ Legacy values are still decrypted for backward compatibility but should be migrated.
+
## [0.1.0] - 2025-02-13
### Added
diff --git a/Cargo.toml b/Cargo.toml
index 13a6334..fbf6ba5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -15,7 +15,7 @@ categories = ["command-line-utilities", "api-bindings"]
clap = { version = "4.5", features = ["derive"] }
# Async runtime - feature-optimized for size
-tokio = { version = "1.42", default-features = false, features = ["rt-multi-thread", "macros", "time", "net", "io-util", "sync", "process", "io-std", "fs"] }
+tokio = { version = "1.42", default-features = false, features = ["rt-multi-thread", "macros", "time", "net", "io-util", "sync", "process", "io-std", "fs", "signal"] }
# HTTP client - minimal features
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "blocking"] }
@@ -49,6 +49,7 @@ async-trait = "0.1"
# Memory / persistence
rusqlite = { version = "0.32", features = ["bundled"] }
chrono = { version = "0.4", default-features = false, features = ["clock", "std"] }
+cron = "0.12"
# Interactive CLI prompts
dialoguer = { version = "0.11", features = ["fuzzy-select"] }
@@ -64,6 +65,12 @@ rustls-pki-types = "1.14.0"
tokio-rustls = "0.26.4"
webpki-roots = "1.0.6"
+# HTTP server (gateway) — replaces raw TCP for proper HTTP/1.1 compliance
+axum = { version = "0.7", default-features = false, features = ["http1", "json", "tokio", "query"] }
+tower = { version = "0.5", default-features = false }
+tower-http = { version = "0.6", default-features = false, features = ["limit", "timeout"] }
+http-body-util = "0.1"
+
[profile.release]
opt-level = "z" # Optimize for size
lto = true # Link-time optimization
diff --git a/Dockerfile b/Dockerfile
index 71a301f..7d684df 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,14 +8,17 @@ COPY src/ src/
RUN cargo build --release --locked && \
strip target/release/zeroclaw
-# ── Stage 2: Runtime (distroless — no shell, no OS, tiny) ────
-FROM gcr.io/distroless/cc-debian12
+# ── Stage 2: Runtime (distroless nonroot — no shell, no OS, tiny, UID 65534) ──
+FROM gcr.io/distroless/cc-debian12:nonroot
COPY --from=builder /app/target/release/zeroclaw /usr/local/bin/zeroclaw
-# Default workspace
+# Default workspace (owned by nonroot user)
VOLUME ["/workspace"]
ENV ZEROCLAW_WORKSPACE=/workspace
+# Explicitly set non-root user (distroless:nonroot defaults to 65534, but be explicit)
+USER 65534:65534
+
ENTRYPOINT ["zeroclaw"]
CMD ["gateway"]
diff --git a/README.md b/README.md
index 5efbbf7..6b3cbe7 100644
--- a/README.md
+++ b/README.md
@@ -12,12 +12,19 @@
-The fastest, smallest, fully autonomous AI assistant — deploy anywhere, swap anything.
+Fast, small, and fully autonomous AI assistant infrastructure — deploy anywhere, swap anything.
```
~3.4MB binary · <10ms startup · 1,017 tests · 22+ providers · 8 traits · Pluggable everything
```
+### Why teams pick ZeroClaw
+
+- **Lean by default:** small Rust binary, fast startup, low memory footprint.
+- **Secure by design:** pairing, strict sandboxing, explicit allowlists, workspace scoping.
+- **Fully swappable:** core systems are traits (providers, channels, tools, memory, tunnels).
+- **No lock-in:** OpenAI-compatible provider support + pluggable custom endpoints.
+
## Benchmark Snapshot (ZeroClaw vs OpenClaw)
Local machine quick benchmark (macOS arm64, Feb 2026), same host, 3 runs each.
@@ -30,7 +37,17 @@ Local machine quick benchmark (macOS arm64, Feb 2026), same host, 3 runs each.
| `--help` max RSS observed | **~7.3 MB** | **~394 MB** |
| `status` max RSS observed | **~7.8 MB** | **~1.52 GB** |
-> Notes: measured with `/usr/bin/time -l`; first run includes cold-start effects. OpenClaw results include `pnpm install` + `pnpm build` before execution.
+> Notes: measured with `/usr/bin/time -l`; first run includes cold-start effects. OpenClaw results were measured after `pnpm install` + `pnpm build`.
+
+Reproduce ZeroClaw numbers locally:
+
+```bash
+cargo build --release
+ls -lh target/release/zeroclaw
+
+/usr/bin/time -l target/release/zeroclaw --help
+/usr/bin/time -l target/release/zeroclaw status
+```
## Quick Start
@@ -38,34 +55,52 @@ Local machine quick benchmark (macOS arm64, Feb 2026), same host, 3 runs each.
git clone https://github.com/theonlyhennygod/zeroclaw.git
cd zeroclaw
cargo build --release
+cargo install --path . --force
# Quick setup (no prompts)
-cargo run --release -- onboard --api-key sk-... --provider openrouter
+zeroclaw onboard --api-key sk-... --provider openrouter
# Or interactive wizard
-cargo run --release -- onboard --interactive
+zeroclaw onboard --interactive
+
+# Or quickly repair channels/allowlists only
+zeroclaw onboard --channels-only
# Chat
-cargo run --release -- agent -m "Hello, ZeroClaw!"
+zeroclaw agent -m "Hello, ZeroClaw!"
# Interactive mode
-cargo run --release -- agent
+zeroclaw agent
# Start the gateway (webhook server)
-cargo run --release -- gateway # default: 127.0.0.1:8080
-cargo run --release -- gateway --port 0 # random port (security hardened)
+zeroclaw gateway # default: 127.0.0.1:8080
+zeroclaw gateway --port 0 # random port (security hardened)
+
+# Start full autonomous runtime
+zeroclaw daemon
# Check status
-cargo run --release -- status
+zeroclaw status
+
+# Run system diagnostics
+zeroclaw doctor
# Check channel health
-cargo run --release -- channel doctor
+zeroclaw channel doctor
# Get integration setup details
-cargo run --release -- integrations info Telegram
+zeroclaw integrations info Telegram
+
+# Manage background service
+zeroclaw service install
+zeroclaw service status
+
+# Migrate memory from OpenClaw (safe preview first)
+zeroclaw migrate openclaw --dry-run
+zeroclaw migrate openclaw
```
-> **Tip:** Run `cargo install --path .` to install `zeroclaw` globally, then use `zeroclaw` instead of `cargo run --release --`.
+> **Dev fallback (no global install):** prefix commands with `cargo run --release --` (example: `cargo run --release -- status`).
## Architecture
@@ -78,17 +113,25 @@ Every subsystem is a **trait** — swap implementations with a config change, ze
| Subsystem | Trait | Ships with | Extend |
|-----------|-------|------------|--------|
| **AI Models** | `Provider` | 22+ providers (OpenRouter, Anthropic, OpenAI, Ollama, Venice, Groq, Mistral, xAI, DeepSeek, Together, Fireworks, Perplexity, Cohere, Bedrock, etc.) | `custom:https://your-api.com` — any OpenAI-compatible API |
-| **Channels** | `Channel` | CLI, Telegram, Discord, Slack, iMessage, Matrix, Webhook | Any messaging API |
+| **Channels** | `Channel` | CLI, Telegram, Discord, Slack, iMessage, Matrix, WhatsApp, Webhook | Any messaging API |
| **Memory** | `Memory` | SQLite with hybrid search (FTS5 + vector cosine similarity), Markdown | Any persistence backend |
| **Tools** | `Tool` | shell, file_read, file_write, memory_store, memory_recall, memory_forget, browser_open (Brave + allowlist), composio (optional) | Any capability |
| **Observability** | `Observer` | Noop, Log, Multi | Prometheus, OTel |
-| **Runtime** | `RuntimeAdapter` | Native (Mac/Linux/Pi) | Docker, WASM |
+| **Runtime** | `RuntimeAdapter` | Native (Mac/Linux/Pi) | Docker, WASM (planned; unsupported kinds fail fast) |
| **Security** | `SecurityPolicy` | Gateway pairing, sandbox, allowlists, rate limits, filesystem scoping, encrypted secrets | — |
+| **Identity** | `IdentityConfig` | OpenClaw (markdown), AIEOS v1.1 (JSON) | Any identity format |
| **Tunnel** | `Tunnel` | None, Cloudflare, Tailscale, ngrok, Custom | Any tunnel binary |
| **Heartbeat** | Engine | HEARTBEAT.md periodic tasks | — |
| **Skills** | Loader | TOML manifests + SKILL.md instructions | Community skill packs |
| **Integrations** | Registry | 50+ integrations across 9 categories | Plugin system |
+### Runtime support (current)
+
+- ✅ Supported today: `runtime.kind = "native"`
+- 🚧 Planned, not implemented yet: Docker / WASM / edge runtimes
+
+When an unsupported `runtime.kind` is configured, ZeroClaw now exits with a clear error instead of silently falling back to native.
+
### Memory System (Full-Stack Search Engine)
All custom, zero external dependencies — no Pinecone, no Elasticsearch, no LangChain:
@@ -124,7 +167,7 @@ ZeroClaw enforces security at **every layer** — not just the sandbox. It passe
|---|------|--------|-----|
| 1 | **Gateway not publicly exposed** | ✅ | Binds `127.0.0.1` by default. Refuses `0.0.0.0` without tunnel or explicit `allow_public_bind = true`. |
| 2 | **Pairing required** | ✅ | 6-digit one-time code on startup. Exchange via `POST /pair` for bearer token. All `/webhook` requests require `Authorization: Bearer `. |
-| 3 | **Filesystem scoped (no /)** | ✅ | `workspace_only = true` by default. 14 system dirs + 4 sensitive dotfiles blocked. Null byte injection blocked. Symlink escape detection via canonicalization. |
+| 3 | **Filesystem scoped (no /)** | ✅ | `workspace_only = true` by default. 14 system dirs + 4 sensitive dotfiles blocked. Null byte injection blocked. Symlink escape detection via canonicalization + resolved-path workspace checks in file read/write tools. |
| 4 | **Access via tunnel only** | ✅ | Gateway refuses public bind without active tunnel. Supports Tailscale, Cloudflare, ngrok, or any custom tunnel. |
> **Run your own nmap:** `nmap -p 1-65535 ` — ZeroClaw binds to localhost only, so nothing is exposed unless you explicitly configure a tunnel.
@@ -139,6 +182,63 @@ Inbound sender policy is now consistent:
This keeps accidental exposure low by default.
+Recommended low-friction setup (secure + fast):
+
+- **Telegram:** allowlist your own `@username` (without `@`) and/or your numeric Telegram user ID.
+- **Discord:** allowlist your own Discord user ID.
+- **Slack:** allowlist your own Slack member ID (usually starts with `U`).
+- Use `"*"` only for temporary open testing.
+
+If you're not sure which identity to use:
+
+1. Start channels and send one message to your bot.
+2. Read the warning log to see the exact sender identity.
+3. Add that value to the allowlist and rerun channels-only setup.
+
+If you hit authorization warnings in logs (for example: `ignoring message from unauthorized user`),
+rerun channel setup only:
+
+```bash
+zeroclaw onboard --channels-only
+```
+
+### WhatsApp Business Cloud API Setup
+
+WhatsApp uses Meta's Cloud API with webhooks (push-based, not polling):
+
+1. **Create a Meta Business App:**
+ - Go to [developers.facebook.com](https://developers.facebook.com)
+ - Create a new app → Select "Business" type
+ - Add the "WhatsApp" product
+
+2. **Get your credentials:**
+ - **Access Token:** From WhatsApp → API Setup → Generate token (or create a System User for permanent tokens)
+ - **Phone Number ID:** From WhatsApp → API Setup → Phone number ID
+ - **Verify Token:** You define this (any random string) — Meta will send it back during webhook verification
+
+3. **Configure ZeroClaw:**
+ ```toml
+ [channels_config.whatsapp]
+ access_token = "EAABx..."
+ phone_number_id = "123456789012345"
+ verify_token = "my-secret-verify-token"
+ allowed_numbers = ["+1234567890"] # E.164 format, or ["*"] for all
+ ```
+
+4. **Start the gateway with a tunnel:**
+ ```bash
+ zeroclaw gateway --port 8080
+ ```
+ WhatsApp requires HTTPS, so use a tunnel (ngrok, Cloudflare, Tailscale Funnel).
+
+5. **Configure Meta webhook:**
+ - In Meta Developer Console → WhatsApp → Configuration → Webhook
+ - **Callback URL:** `https://your-tunnel-url/whatsapp`
+ - **Verify Token:** Same as your `verify_token` in config
+ - Subscribe to `messages` field
+
+6. **Test:** Send a message to your WhatsApp Business number — ZeroClaw will respond via the LLM.
+
## Configuration
Config: `~/.zeroclaw/config.toml` (created by `onboard`)
@@ -166,6 +266,9 @@ workspace_only = true # default: true — scoped to workspace
allowed_commands = ["git", "npm", "cargo", "ls", "cat", "grep"]
forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"]
+[runtime]
+kind = "native" # only supported value right now; unsupported kinds fail fast
+
[heartbeat]
enabled = false
interval_minutes = 30
@@ -182,8 +285,81 @@ allowed_domains = ["docs.rs"] # required when browser is enabled
[composio]
enabled = false # opt-in: 1000+ OAuth apps via composio.dev
+
+[identity]
+format = "openclaw" # "openclaw" (default, markdown files) or "aieos" (JSON)
+# aieos_path = "identity.json" # path to AIEOS JSON file (relative to workspace or absolute)
+# aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON
```
+## Identity System (AIEOS Support)
+
+ZeroClaw supports **identity-agnostic** AI personas through two formats:
+
+### OpenClaw (Default)
+
+Traditional markdown files in your workspace:
+- `IDENTITY.md` — Who the agent is
+- `SOUL.md` — Core personality and values
+- `USER.md` — Who the agent is helping
+- `AGENTS.md` — Behavior guidelines
+
+### AIEOS (AI Entity Object Specification)
+
+[AIEOS](https://aieos.org) is a standardization framework for portable AI identity. ZeroClaw supports AIEOS v1.1 JSON payloads, allowing you to:
+
+- **Import identities** from the AIEOS ecosystem
+- **Export identities** to other AIEOS-compatible systems
+- **Maintain behavioral integrity** across different AI models
+
+#### Enable AIEOS
+
+```toml
+[identity]
+format = "aieos"
+aieos_path = "identity.json" # relative to workspace or absolute path
+```
+
+Or inline JSON:
+
+```toml
+[identity]
+format = "aieos"
+aieos_inline = '''
+{
+ "identity": {
+ "names": { "first": "Nova", "nickname": "N" }
+ },
+ "psychology": {
+ "neural_matrix": { "creativity": 0.9, "logic": 0.8 },
+ "traits": { "mbti": "ENTP" },
+ "moral_compass": { "alignment": "Chaotic Good" }
+ },
+ "linguistics": {
+ "text_style": { "formality_level": 0.2, "slang_usage": true }
+ },
+ "motivations": {
+ "core_drive": "Push boundaries and explore possibilities"
+ }
+}
+'''
+```
+
+#### AIEOS Schema Sections
+
+| Section | Description |
+|---------|-------------|
+| `identity` | Names, bio, origin, residence |
+| `psychology` | Neural matrix (cognitive weights), MBTI, OCEAN, moral compass |
+| `linguistics` | Text style, formality, catchphrases, forbidden words |
+| `motivations` | Core drive, short/long-term goals, fears |
+| `capabilities` | Skills and tools the agent can access |
+| `physicality` | Visual descriptors for image generation |
+| `history` | Origin story, education, occupation |
+| `interests` | Hobbies, favorites, lifestyle |
+
+See [aieos.org](https://aieos.org) for the full schema and live examples.
+
## Gateway API
| Endpoint | Method | Auth | Description |
@@ -191,6 +367,8 @@ enabled = false # opt-in: 1000+ OAuth apps via composio.dev
| `/health` | GET | None | Health check (always public, no secrets leaked) |
| `/pair` | POST | `X-Pairing-Code` header | Exchange one-time code for bearer token |
| `/webhook` | POST | `Authorization: Bearer ` | Send message: `{"message": "your prompt"}` |
+| `/whatsapp` | GET | Query params | Meta webhook verification (hub.mode, hub.verify_token, hub.challenge) |
+| `/whatsapp` | POST | None (Meta signature) | WhatsApp incoming message webhook |
## Commands
@@ -198,10 +376,14 @@ enabled = false # opt-in: 1000+ OAuth apps via composio.dev
|---------|-------------|
| `onboard` | Quick setup (default) |
| `onboard --interactive` | Full interactive 7-step wizard |
+| `onboard --channels-only` | Reconfigure channels/allowlists only (fast repair flow) |
| `agent -m "..."` | Single message mode |
| `agent` | Interactive chat mode |
| `gateway` | Start webhook server (default: `127.0.0.1:8080`) |
| `gateway --port 0` | Random port mode |
+| `daemon` | Start long-running autonomous runtime |
+| `service install/start/stop/status/uninstall` | Manage user-level background service |
+| `doctor` | Diagnose daemon/scheduler/channel freshness |
| `status` | Show full system status |
| `channel doctor` | Run health checks for configured channels |
| `integrations info ` | Show setup/status details for one integration |
diff --git a/SECURITY.md b/SECURITY.md
index 9fc4b11..32c7c28 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -61,3 +61,33 @@ cargo test -- tools::shell
cargo test -- tools::file_read
cargo test -- tools::file_write
```
+
+## Container Security
+
+ZeroClaw Docker images follow CIS Docker Benchmark best practices:
+
+| Control | Implementation |
+|---------|----------------|
+| **4.1 Non-root user** | Container runs as UID 65534 (distroless nonroot) |
+| **4.2 Minimal base image** | `gcr.io/distroless/cc-debian12:nonroot` — no shell, no package manager |
+| **4.6 HEALTHCHECK** | Not applicable (stateless CLI/gateway) |
+| **5.25 Read-only filesystem** | Supported via `docker run --read-only` with `/workspace` volume |
+
+### Verifying Container Security
+
+```bash
+# Build and verify non-root user
+docker build -t zeroclaw .
+docker inspect --format='{{.Config.User}}' zeroclaw
+# Expected: 65534:65534
+
+# Run with read-only filesystem (production hardening)
+docker run --read-only -v /path/to/workspace:/workspace zeroclaw gateway
+```
+
+### CI Enforcement
+
+The `docker` job in `.github/workflows/ci.yml` automatically verifies:
+1. Container does not run as root (UID 0)
+2. Runtime stage uses `:nonroot` variant
+3. Explicit `USER` directive with numeric UID exists
diff --git a/scripts/test_dockerignore.sh b/scripts/test_dockerignore.sh
new file mode 100755
index 0000000..839d21e
--- /dev/null
+++ b/scripts/test_dockerignore.sh
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+# Test script to verify .dockerignore excludes sensitive paths
+# Run: ./scripts/test_dockerignore.sh
+
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+DOCKERIGNORE="$PROJECT_ROOT/.dockerignore"
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+NC='\033[0m' # No Color
+
+PASS=0
+FAIL=0
+
+log_pass() {
+ echo -e "${GREEN}✓${NC} $1"
+ PASS=$((PASS + 1))
+}
+
+log_fail() {
+ echo -e "${RED}✗${NC} $1"
+ FAIL=$((FAIL + 1))
+}
+
+# Test 1: .dockerignore exists
+echo "=== Testing .dockerignore ==="
+if [[ -f "$DOCKERIGNORE" ]]; then
+ log_pass ".dockerignore file exists"
+else
+ log_fail ".dockerignore file does not exist"
+ exit 1
+fi
+
+# Test 2: Required exclusions are present
+MUST_EXCLUDE=(
+ ".git"
+ ".githooks"
+ "target"
+ "docs"
+ "examples"
+ "tests"
+ "*.md"
+ "*.png"
+ "*.db"
+ "*.db-journal"
+ ".DS_Store"
+ ".github"
+ "deny.toml"
+ "LICENSE"
+ ".env"
+ ".tmp_*"
+)
+
+for pattern in "${MUST_EXCLUDE[@]}"; do
+ # Use fgrep for literal matching
+ if grep -Fq "$pattern" "$DOCKERIGNORE" 2>/dev/null; then
+ log_pass "Excludes: $pattern"
+ else
+ log_fail "Missing exclusion: $pattern"
+ fi
+done
+
+# Test 3: Build essentials are NOT excluded
+MUST_NOT_EXCLUDE=(
+ "Cargo.toml"
+ "Cargo.lock"
+ "src"
+)
+
+for path in "${MUST_NOT_EXCLUDE[@]}"; do
+ if grep -qE "^${path}$" "$DOCKERIGNORE" 2>/dev/null; then
+ log_fail "Build essential '$path' is incorrectly excluded"
+ else
+ log_pass "Build essential NOT excluded: $path"
+ fi
+done
+
+# Test 4: No syntax errors (basic validation)
+while IFS= read -r line; do
+ # Skip empty lines and comments
+ [[ -z "$line" || "$line" =~ ^# ]] && continue
+
+ # Check for common issues
+ if [[ "$line" =~ [[:space:]]$ ]]; then
+ log_fail "Trailing whitespace in pattern: '$line'"
+ fi
+done < "$DOCKERIGNORE"
+log_pass "No trailing whitespace in patterns"
+
+# Test 5: Verify Docker build context would be small
+echo ""
+echo "=== Simulating Docker build context ==="
+
+# Create temp dir and simulate what would be sent
+TEMP_DIR=$(mktemp -d)
+trap "rm -rf $TEMP_DIR" EXIT
+
+# Use rsync with .dockerignore patterns to simulate Docker's behavior
+cd "$PROJECT_ROOT"
+
+# Count files that WOULD be sent (excluding .dockerignore patterns)
+TOTAL_FILES=$(find . -type f | wc -l | tr -d ' ')
+CONTEXT_FILES=$(find . -type f \
+ ! -path './.git/*' \
+ ! -path './target/*' \
+ ! -path './docs/*' \
+ ! -path './examples/*' \
+ ! -path './tests/*' \
+ ! -name '*.md' \
+ ! -name '*.png' \
+ ! -name '*.svg' \
+ ! -name '*.db' \
+ ! -name '*.db-journal' \
+ ! -name '.DS_Store' \
+ ! -path './.github/*' \
+ ! -name 'deny.toml' \
+ ! -name 'LICENSE' \
+ ! -name '.env' \
+ ! -name '.env.*' \
+ 2>/dev/null | wc -l | tr -d ' ')
+
+echo "Total files in repo: $TOTAL_FILES"
+echo "Files in Docker context: $CONTEXT_FILES"
+
+if [[ $CONTEXT_FILES -lt $TOTAL_FILES ]]; then
+ log_pass "Docker context is smaller than full repo ($CONTEXT_FILES < $TOTAL_FILES files)"
+else
+ log_fail "Docker context is not being reduced"
+fi
+
+# Test 6: Verify critical security files would be excluded
+echo ""
+echo "=== Security checks ==="
+
+# Check if .git would be excluded
+if [[ -d "$PROJECT_ROOT/.git" ]]; then
+ if grep -q "^\.git$" "$DOCKERIGNORE"; then
+ log_pass ".git directory will be excluded (security)"
+ else
+ log_fail ".git directory NOT excluded - SECURITY RISK"
+ fi
+fi
+
+# Check if any .db files exist and would be excluded
+DB_FILES=$(find "$PROJECT_ROOT" -name "*.db" -type f 2>/dev/null | head -5)
+if [[ -n "$DB_FILES" ]]; then
+ if grep -q "^\*\.db$" "$DOCKERIGNORE"; then
+ log_pass "*.db files will be excluded (security)"
+ else
+ log_fail "*.db files NOT excluded - SECURITY RISK"
+ fi
+fi
+
+# Summary
+echo ""
+echo "=== Summary ==="
+echo -e "Passed: ${GREEN}$PASS${NC}"
+echo -e "Failed: ${RED}$FAIL${NC}"
+
+if [[ $FAIL -gt 0 ]]; then
+ echo -e "${RED}FAILED${NC}: $FAIL tests failed"
+ exit 1
+else
+ echo -e "${GREEN}PASSED${NC}: All tests passed"
+ exit 0
+fi
diff --git a/src/agent/loop_.rs b/src/agent/loop_.rs
index 57e0182..0f611d7 100644
--- a/src/agent/loop_.rs
+++ b/src/agent/loop_.rs
@@ -39,7 +39,7 @@ pub async fn run(
// ── Wire up agnostic subsystems ──────────────────────────────
let observer: Arc =
Arc::from(observability::create_observer(&config.observability));
- let _runtime = runtime::create_runtime(&config.runtime);
+ let _runtime = runtime::create_runtime(&config.runtime)?;
let security = Arc::new(SecurityPolicy::from_config(
&config.autonomy,
&config.workspace_dir,
@@ -72,8 +72,11 @@ pub async fn run(
.or(config.default_model.as_deref())
.unwrap_or("anthropic/claude-sonnet-4-20250514");
- let provider: Box =
- providers::create_provider(provider_name, config.api_key.as_deref())?;
+ let provider: Box = providers::create_resilient_provider(
+ provider_name,
+ config.api_key.as_deref(),
+ &config.reliability,
+ )?;
observer.record_event(&ObserverEvent::AgentStart {
provider: provider_name.to_string(),
@@ -83,12 +86,30 @@ pub async fn run(
// ── Build system prompt from workspace MD files (OpenClaw framework) ──
let skills = crate::skills::load_skills(&config.workspace_dir);
let mut tool_descs: Vec<(&str, &str)> = vec![
- ("shell", "Execute terminal commands"),
- ("file_read", "Read file contents"),
- ("file_write", "Write file contents"),
- ("memory_store", "Save to memory"),
- ("memory_recall", "Search memory"),
- ("memory_forget", "Delete a memory entry"),
+ (
+ "shell",
+ "Execute terminal commands. Use when: running local checks, build/test commands, diagnostics. Don't use when: a safer dedicated tool exists, or command is destructive without approval.",
+ ),
+ (
+ "file_read",
+ "Read file contents. Use when: inspecting project files, configs, logs. Don't use when: a targeted search is enough.",
+ ),
+ (
+ "file_write",
+ "Write file contents. Use when: applying focused edits, scaffolding files, updating docs/code. Don't use when: side effects are unclear or file ownership is uncertain.",
+ ),
+ (
+ "memory_store",
+ "Save to memory. Use when: preserving durable preferences, decisions, key context. Don't use when: information is transient/noisy/sensitive without need.",
+ ),
+ (
+ "memory_recall",
+ "Search memory. Use when: retrieving prior decisions, user preferences, historical context. Don't use when: answer is already in current context.",
+ ),
+ (
+ "memory_forget",
+ "Delete a memory entry. Use when: memory is incorrect/stale or explicitly requested for removal. Don't use when: impact is uncertain.",
+ ),
];
if config.browser.enabled {
tool_descs.push((
diff --git a/src/channels/imessage.rs b/src/channels/imessage.rs
index a0ac72e..c3a8abf 100644
--- a/src/channels/imessage.rs
+++ b/src/channels/imessage.rs
@@ -29,6 +29,60 @@ impl IMessageChannel {
}
}
+/// Escape a string for safe interpolation into `AppleScript`.
+///
+/// This prevents injection attacks by escaping:
+/// - Backslashes (`\` → `\\`)
+/// - Double quotes (`"` → `\"`)
+fn escape_applescript(s: &str) -> String {
+ s.replace('\\', "\\\\").replace('"', "\\\"")
+}
+
+/// Validate that a target looks like a valid phone number or email address.
+///
+/// This is a defense-in-depth measure to reject obviously malicious targets
+/// before they reach `AppleScript` interpolation.
+///
+/// Valid patterns:
+/// - Phone: starts with `+` followed by digits (with optional spaces/dashes)
+/// - Email: contains `@` with alphanumeric chars on both sides
+fn is_valid_imessage_target(target: &str) -> bool {
+ let target = target.trim();
+ if target.is_empty() {
+ return false;
+ }
+
+ // Phone number: +1234567890 or +1 234-567-8900
+ if target.starts_with('+') {
+ let digits_only: String = target.chars().filter(char::is_ascii_digit).collect();
+ // Must have at least 7 digits (shortest valid phone numbers)
+ return digits_only.len() >= 7 && digits_only.len() <= 15;
+ }
+
+ // Email: simple validation (contains @ with chars on both sides)
+ if let Some(at_pos) = target.find('@') {
+ let local = &target[..at_pos];
+ let domain = &target[at_pos + 1..];
+
+ // Local part: non-empty, alphanumeric + common email chars
+ let local_valid = !local.is_empty()
+ && local
+ .chars()
+ .all(|c| c.is_alphanumeric() || "._+-".contains(c));
+
+ // Domain: non-empty, contains a dot, alphanumeric + dots/hyphens
+ let domain_valid = !domain.is_empty()
+ && domain.contains('.')
+ && domain
+ .chars()
+ .all(|c| c.is_alphanumeric() || ".-".contains(c));
+
+ return local_valid && domain_valid;
+ }
+
+ false
+}
+
#[async_trait]
impl Channel for IMessageChannel {
fn name(&self) -> &str {
@@ -36,11 +90,22 @@ impl Channel for IMessageChannel {
}
async fn send(&self, message: &str, target: &str) -> anyhow::Result<()> {
- let escaped_msg = message.replace('\\', "\\\\").replace('"', "\\\"");
+ // Defense-in-depth: validate target format before any interpolation
+ if !is_valid_imessage_target(target) {
+ anyhow::bail!(
+ "Invalid iMessage target: must be a phone number (+1234567890) or email (user@example.com)"
+ );
+ }
+
+ // SECURITY: Escape both message AND target to prevent AppleScript injection
+ // See: CWE-78 (OS Command Injection)
+ let escaped_msg = escape_applescript(message);
+ let escaped_target = escape_applescript(target);
+
let script = format!(
r#"tell application "Messages"
set targetService to 1st account whose service type = iMessage
- set targetBuddy to participant "{target}" of targetService
+ set targetBuddy to participant "{escaped_target}" of targetService
send "{escaped_msg}" to targetBuddy
end tell"#
);
@@ -262,4 +327,204 @@ mod tests {
assert!(ch.is_contact_allowed(" spaced "));
assert!(!ch.is_contact_allowed("spaced"));
}
+
+ // ══════════════════════════════════════════════════════════
+ // AppleScript Escaping Tests (CWE-78 Prevention)
+ // ══════════════════════════════════════════════════════════
+
+ #[test]
+ fn escape_applescript_double_quotes() {
+ assert_eq!(escape_applescript(r#"hello "world""#), r#"hello \"world\""#);
+ }
+
+ #[test]
+ fn escape_applescript_backslashes() {
+ assert_eq!(escape_applescript(r"path\to\file"), r"path\\to\\file");
+ }
+
+ #[test]
+ fn escape_applescript_mixed() {
+ assert_eq!(
+ escape_applescript(r#"say "hello\" world"#),
+ r#"say \"hello\\\" world"#
+ );
+ }
+
+ #[test]
+ fn escape_applescript_injection_attempt() {
+ // This is the exact attack vector from the security report
+ let malicious = r#"" & do shell script "id" & ""#;
+ let escaped = escape_applescript(malicious);
+ // After escaping, the quotes should be escaped and not break out
+ assert_eq!(escaped, r#"\" & do shell script \"id\" & \""#);
+ // Verify all quotes are now escaped (preceded by backslash)
+ // The escaped string should not have any unescaped quotes (quote not preceded by backslash)
+ let chars: Vec = escaped.chars().collect();
+ for (i, &c) in chars.iter().enumerate() {
+ if c == '"' {
+ // Every quote must be preceded by a backslash
+ assert!(
+ i > 0 && chars[i - 1] == '\\',
+ "Found unescaped quote at position {i}"
+ );
+ }
+ }
+ }
+
+ #[test]
+ fn escape_applescript_empty_string() {
+ assert_eq!(escape_applescript(""), "");
+ }
+
+ #[test]
+ fn escape_applescript_no_special_chars() {
+ assert_eq!(escape_applescript("hello world"), "hello world");
+ }
+
+ #[test]
+ fn escape_applescript_unicode() {
+ assert_eq!(escape_applescript("hello 🦀 world"), "hello 🦀 world");
+ }
+
+ #[test]
+ fn escape_applescript_newlines_preserved() {
+ assert_eq!(escape_applescript("line1\nline2"), "line1\nline2");
+ }
+
+ // ══════════════════════════════════════════════════════════
+ // Target Validation Tests
+ // ══════════════════════════════════════════════════════════
+
+ #[test]
+ fn valid_phone_number_simple() {
+ assert!(is_valid_imessage_target("+1234567890"));
+ }
+
+ #[test]
+ fn valid_phone_number_with_country_code() {
+ assert!(is_valid_imessage_target("+14155551234"));
+ }
+
+ #[test]
+ fn valid_phone_number_with_spaces() {
+ assert!(is_valid_imessage_target("+1 415 555 1234"));
+ }
+
+ #[test]
+ fn valid_phone_number_with_dashes() {
+ assert!(is_valid_imessage_target("+1-415-555-1234"));
+ }
+
+ #[test]
+ fn valid_phone_number_international() {
+ assert!(is_valid_imessage_target("+447911123456")); // UK
+ assert!(is_valid_imessage_target("+81312345678")); // Japan
+ }
+
+ #[test]
+ fn valid_email_simple() {
+ assert!(is_valid_imessage_target("user@example.com"));
+ }
+
+ #[test]
+ fn valid_email_with_subdomain() {
+ assert!(is_valid_imessage_target("user@mail.example.com"));
+ }
+
+ #[test]
+ fn valid_email_with_plus() {
+ assert!(is_valid_imessage_target("user+tag@example.com"));
+ }
+
+ #[test]
+ fn valid_email_with_dots() {
+ assert!(is_valid_imessage_target("first.last@example.com"));
+ }
+
+ #[test]
+ fn valid_email_icloud() {
+ assert!(is_valid_imessage_target("user@icloud.com"));
+ assert!(is_valid_imessage_target("user@me.com"));
+ }
+
+ #[test]
+ fn invalid_target_empty() {
+ assert!(!is_valid_imessage_target(""));
+ assert!(!is_valid_imessage_target(" "));
+ }
+
+ #[test]
+ fn invalid_target_no_plus_prefix() {
+ // Phone numbers must start with +
+ assert!(!is_valid_imessage_target("1234567890"));
+ }
+
+ #[test]
+ fn invalid_target_too_short_phone() {
+ // Less than 7 digits
+ assert!(!is_valid_imessage_target("+123456"));
+ }
+
+ #[test]
+ fn invalid_target_too_long_phone() {
+ // More than 15 digits
+ assert!(!is_valid_imessage_target("+1234567890123456"));
+ }
+
+ #[test]
+ fn invalid_target_email_no_at() {
+ assert!(!is_valid_imessage_target("userexample.com"));
+ }
+
+ #[test]
+ fn invalid_target_email_no_domain() {
+ assert!(!is_valid_imessage_target("user@"));
+ }
+
+ #[test]
+ fn invalid_target_email_no_local() {
+ assert!(!is_valid_imessage_target("@example.com"));
+ }
+
+ #[test]
+ fn invalid_target_email_no_dot_in_domain() {
+ assert!(!is_valid_imessage_target("user@localhost"));
+ }
+
+ #[test]
+ fn invalid_target_injection_attempt() {
+ // The exact attack vector from the security report
+ assert!(!is_valid_imessage_target(r#"" & do shell script "id" & ""#));
+ }
+
+ #[test]
+ fn invalid_target_applescript_injection() {
+ // Various injection attempts
+ assert!(!is_valid_imessage_target(r#"test" & quit"#));
+ assert!(!is_valid_imessage_target(r#"test\ndo shell script"#));
+ assert!(!is_valid_imessage_target("test\"; malicious code; \""));
+ }
+
+ #[test]
+ fn invalid_target_special_chars() {
+ assert!(!is_valid_imessage_target("user & \"quotes\" 'apostrophe'" }
+ }]
+ }
+ }]
+ }]
+ });
+ let msgs = ch.parse_webhook_payload(&payload);
+ assert_eq!(msgs.len(), 1);
+ assert_eq!(
+ msgs[0].content,
+ " & \"quotes\" 'apostrophe'"
+ );
}
}
diff --git a/src/config/mod.rs b/src/config/mod.rs
index 9af098c..f5849c1 100644
--- a/src/config/mod.rs
+++ b/src/config/mod.rs
@@ -2,7 +2,7 @@ pub mod schema;
pub use schema::{
AutonomyConfig, BrowserConfig, ChannelsConfig, ComposioConfig, Config, DiscordConfig,
- GatewayConfig, HeartbeatConfig, IMessageConfig, MatrixConfig, MemoryConfig,
- ObservabilityConfig, RuntimeConfig, SecretsConfig, SlackConfig, TelegramConfig, TunnelConfig,
- WebhookConfig,
+ GatewayConfig, HeartbeatConfig, IMessageConfig, IdentityConfig, MatrixConfig, MemoryConfig,
+ ObservabilityConfig, ReliabilityConfig, RuntimeConfig, SecretsConfig, SlackConfig,
+ TelegramConfig, TunnelConfig, WebhookConfig,
};
diff --git a/src/config/schema.rs b/src/config/schema.rs
index 49a9d59..872a600 100644
--- a/src/config/schema.rs
+++ b/src/config/schema.rs
@@ -25,6 +25,9 @@ pub struct Config {
#[serde(default)]
pub runtime: RuntimeConfig,
+ #[serde(default)]
+ pub reliability: ReliabilityConfig,
+
#[serde(default)]
pub heartbeat: HeartbeatConfig,
@@ -48,6 +51,38 @@ pub struct Config {
#[serde(default)]
pub browser: BrowserConfig,
+
+ #[serde(default)]
+ pub identity: IdentityConfig,
+}
+
+// ── Identity (AIEOS / OpenClaw format) ──────────────────────────
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct IdentityConfig {
+ /// Identity format: "openclaw" (default) or "aieos"
+ #[serde(default = "default_identity_format")]
+ pub format: String,
+ /// Path to AIEOS JSON file (relative to workspace)
+ #[serde(default)]
+ pub aieos_path: Option,
+ /// Inline AIEOS JSON (alternative to file path)
+ #[serde(default)]
+ pub aieos_inline: Option,
+}
+
+fn default_identity_format() -> String {
+ "openclaw".into()
+}
+
+impl Default for IdentityConfig {
+ fn default() -> Self {
+ Self {
+ format: default_identity_format(),
+ aieos_path: None,
+ aieos_inline: None,
+ }
+ }
}
// ── Gateway security ─────────────────────────────────────────────
@@ -143,6 +178,18 @@ pub struct MemoryConfig {
pub backend: String,
/// Auto-save conversation context to memory
pub auto_save: bool,
+ /// Run memory/session hygiene (archiving + retention cleanup)
+ #[serde(default = "default_hygiene_enabled")]
+ pub hygiene_enabled: bool,
+ /// Archive daily/session files older than this many days
+ #[serde(default = "default_archive_after_days")]
+ pub archive_after_days: u32,
+ /// Purge archived files older than this many days
+ #[serde(default = "default_purge_after_days")]
+ pub purge_after_days: u32,
+ /// For sqlite backend: prune conversation rows older than this many days
+ #[serde(default = "default_conversation_retention_days")]
+ pub conversation_retention_days: u32,
/// Embedding provider: "none" | "openai" | "custom:URL"
#[serde(default = "default_embedding_provider")]
pub embedding_provider: String,
@@ -169,6 +216,18 @@ pub struct MemoryConfig {
fn default_embedding_provider() -> String {
"none".into()
}
+fn default_hygiene_enabled() -> bool {
+ true
+}
+fn default_archive_after_days() -> u32 {
+ 7
+}
+fn default_purge_after_days() -> u32 {
+ 30
+}
+fn default_conversation_retention_days() -> u32 {
+ 30
+}
fn default_embedding_model() -> String {
"text-embedding-3-small".into()
}
@@ -193,6 +252,10 @@ impl Default for MemoryConfig {
Self {
backend: "sqlite".into(),
auto_save: true,
+ hygiene_enabled: default_hygiene_enabled(),
+ archive_after_days: default_archive_after_days(),
+ purge_after_days: default_purge_after_days(),
+ conversation_retention_days: default_conversation_retention_days(),
embedding_provider: default_embedding_provider(),
embedding_model: default_embedding_model(),
embedding_dimensions: default_embedding_dims(),
@@ -281,7 +344,9 @@ impl Default for AutonomyConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RuntimeConfig {
- /// "native" | "docker" | "cloudflare"
+ /// Runtime kind (currently supported: "native").
+ ///
+ /// Reserved values (not implemented yet): "docker", "cloudflare".
pub kind: String,
}
@@ -293,6 +358,71 @@ impl Default for RuntimeConfig {
}
}
+// ── Reliability / supervision ────────────────────────────────────
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ReliabilityConfig {
+ /// Retries per provider before failing over.
+ #[serde(default = "default_provider_retries")]
+ pub provider_retries: u32,
+ /// Base backoff (ms) for provider retry delay.
+ #[serde(default = "default_provider_backoff_ms")]
+ pub provider_backoff_ms: u64,
+ /// Fallback provider chain (e.g. `["anthropic", "openai"]`).
+ #[serde(default)]
+ pub fallback_providers: Vec,
+ /// Initial backoff for channel/daemon restarts.
+ #[serde(default = "default_channel_backoff_secs")]
+ pub channel_initial_backoff_secs: u64,
+ /// Max backoff for channel/daemon restarts.
+ #[serde(default = "default_channel_backoff_max_secs")]
+ pub channel_max_backoff_secs: u64,
+ /// Scheduler polling cadence in seconds.
+ #[serde(default = "default_scheduler_poll_secs")]
+ pub scheduler_poll_secs: u64,
+ /// Max retries for cron job execution attempts.
+ #[serde(default = "default_scheduler_retries")]
+ pub scheduler_retries: u32,
+}
+
+fn default_provider_retries() -> u32 {
+ 2
+}
+
+fn default_provider_backoff_ms() -> u64 {
+ 500
+}
+
+fn default_channel_backoff_secs() -> u64 {
+ 2
+}
+
+fn default_channel_backoff_max_secs() -> u64 {
+ 60
+}
+
+fn default_scheduler_poll_secs() -> u64 {
+ 15
+}
+
+fn default_scheduler_retries() -> u32 {
+ 2
+}
+
+impl Default for ReliabilityConfig {
+ fn default() -> Self {
+ Self {
+ provider_retries: default_provider_retries(),
+ provider_backoff_ms: default_provider_backoff_ms(),
+ fallback_providers: Vec::new(),
+ channel_initial_backoff_secs: default_channel_backoff_secs(),
+ channel_max_backoff_secs: default_channel_backoff_max_secs(),
+ scheduler_poll_secs: default_scheduler_poll_secs(),
+ scheduler_retries: default_scheduler_retries(),
+ }
+ }
+}
+
// ── Heartbeat ────────────────────────────────────────────────────
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -387,6 +517,7 @@ pub struct ChannelsConfig {
pub webhook: Option,
pub imessage: Option,
pub matrix: Option,
+ pub whatsapp: Option,
}
impl Default for ChannelsConfig {
@@ -399,6 +530,7 @@ impl Default for ChannelsConfig {
webhook: None,
imessage: None,
matrix: None,
+ whatsapp: None,
}
}
}
@@ -445,6 +577,19 @@ pub struct MatrixConfig {
pub allowed_users: Vec,
}
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct WhatsAppConfig {
+ /// Access token from Meta Business Suite
+ pub access_token: String,
+ /// Phone number ID from Meta Business API
+ pub phone_number_id: String,
+ /// Webhook verify token (you define this, Meta sends it back for verification)
+ pub verify_token: String,
+ /// Allowed phone numbers (E.164 format: +1234567890) or "*" for all
+ #[serde(default)]
+ pub allowed_numbers: Vec,
+}
+
// ── Config impl ──────────────────────────────────────────────────
impl Default for Config {
@@ -463,6 +608,7 @@ impl Default for Config {
observability: ObservabilityConfig::default(),
autonomy: AutonomyConfig::default(),
runtime: RuntimeConfig::default(),
+ reliability: ReliabilityConfig::default(),
heartbeat: HeartbeatConfig::default(),
channels_config: ChannelsConfig::default(),
memory: MemoryConfig::default(),
@@ -471,6 +617,7 @@ impl Default for Config {
composio: ComposioConfig::default(),
secrets: SecretsConfig::default(),
browser: BrowserConfig::default(),
+ identity: IdentityConfig::default(),
}
}
}
@@ -558,6 +705,17 @@ mod tests {
assert_eq!(h.interval_minutes, 30);
}
+ #[test]
+ fn memory_config_default_hygiene_settings() {
+ let m = MemoryConfig::default();
+ assert_eq!(m.backend, "sqlite");
+ assert!(m.auto_save);
+ assert!(m.hygiene_enabled);
+ assert_eq!(m.archive_after_days, 7);
+ assert_eq!(m.purge_after_days, 30);
+ assert_eq!(m.conversation_retention_days, 30);
+ }
+
#[test]
fn channels_config_default() {
let c = ChannelsConfig::default();
@@ -591,6 +749,7 @@ mod tests {
runtime: RuntimeConfig {
kind: "docker".into(),
},
+ reliability: ReliabilityConfig::default(),
heartbeat: HeartbeatConfig {
enabled: true,
interval_minutes: 15,
@@ -606,6 +765,7 @@ mod tests {
webhook: None,
imessage: None,
matrix: None,
+ whatsapp: None,
},
memory: MemoryConfig::default(),
tunnel: TunnelConfig::default(),
@@ -613,6 +773,7 @@ mod tests {
composio: ComposioConfig::default(),
secrets: SecretsConfig::default(),
browser: BrowserConfig::default(),
+ identity: IdentityConfig::default(),
};
let toml_str = toml::to_string_pretty(&config).unwrap();
@@ -650,6 +811,10 @@ default_temperature = 0.7
assert_eq!(parsed.runtime.kind, "native");
assert!(!parsed.heartbeat.enabled);
assert!(parsed.channels_config.cli);
+ assert!(parsed.memory.hygiene_enabled);
+ assert_eq!(parsed.memory.archive_after_days, 7);
+ assert_eq!(parsed.memory.purge_after_days, 30);
+ assert_eq!(parsed.memory.conversation_retention_days, 30);
}
#[test]
@@ -669,6 +834,7 @@ default_temperature = 0.7
observability: ObservabilityConfig::default(),
autonomy: AutonomyConfig::default(),
runtime: RuntimeConfig::default(),
+ reliability: ReliabilityConfig::default(),
heartbeat: HeartbeatConfig::default(),
channels_config: ChannelsConfig::default(),
memory: MemoryConfig::default(),
@@ -677,6 +843,7 @@ default_temperature = 0.7
composio: ComposioConfig::default(),
secrets: SecretsConfig::default(),
browser: BrowserConfig::default(),
+ identity: IdentityConfig::default(),
};
config.save().unwrap();
@@ -810,6 +977,7 @@ default_temperature = 0.7
room_id: "!r:m".into(),
allowed_users: vec!["@u:m".into()],
}),
+ whatsapp: None,
};
let toml_str = toml::to_string_pretty(&c).unwrap();
let parsed: ChannelsConfig = toml::from_str(&toml_str).unwrap();
@@ -894,6 +1062,89 @@ channel_id = "C123"
assert_eq!(parsed.port, 8080);
}
+ // ── WhatsApp config ──────────────────────────────────────
+
+ #[test]
+ fn whatsapp_config_serde() {
+ let wc = WhatsAppConfig {
+ access_token: "EAABx...".into(),
+ phone_number_id: "123456789".into(),
+ verify_token: "my-verify-token".into(),
+ allowed_numbers: vec!["+1234567890".into(), "+9876543210".into()],
+ };
+ let json = serde_json::to_string(&wc).unwrap();
+ let parsed: WhatsAppConfig = serde_json::from_str(&json).unwrap();
+ assert_eq!(parsed.access_token, "EAABx...");
+ assert_eq!(parsed.phone_number_id, "123456789");
+ assert_eq!(parsed.verify_token, "my-verify-token");
+ assert_eq!(parsed.allowed_numbers.len(), 2);
+ }
+
+ #[test]
+ fn whatsapp_config_toml_roundtrip() {
+ let wc = WhatsAppConfig {
+ access_token: "tok".into(),
+ phone_number_id: "12345".into(),
+ verify_token: "verify".into(),
+ allowed_numbers: vec!["+1".into()],
+ };
+ let toml_str = toml::to_string(&wc).unwrap();
+ let parsed: WhatsAppConfig = toml::from_str(&toml_str).unwrap();
+ assert_eq!(parsed.phone_number_id, "12345");
+ assert_eq!(parsed.allowed_numbers, vec!["+1"]);
+ }
+
+ #[test]
+ fn whatsapp_config_deserializes_without_allowed_numbers() {
+ let json = r#"{"access_token":"tok","phone_number_id":"123","verify_token":"ver"}"#;
+ let parsed: WhatsAppConfig = serde_json::from_str(json).unwrap();
+ assert!(parsed.allowed_numbers.is_empty());
+ }
+
+ #[test]
+ fn whatsapp_config_wildcard_allowed() {
+ let wc = WhatsAppConfig {
+ access_token: "tok".into(),
+ phone_number_id: "123".into(),
+ verify_token: "ver".into(),
+ allowed_numbers: vec!["*".into()],
+ };
+ let toml_str = toml::to_string(&wc).unwrap();
+ let parsed: WhatsAppConfig = toml::from_str(&toml_str).unwrap();
+ assert_eq!(parsed.allowed_numbers, vec!["*"]);
+ }
+
+ #[test]
+ fn channels_config_with_whatsapp() {
+ let c = ChannelsConfig {
+ cli: true,
+ telegram: None,
+ discord: None,
+ slack: None,
+ webhook: None,
+ imessage: None,
+ matrix: None,
+ whatsapp: Some(WhatsAppConfig {
+ access_token: "tok".into(),
+ phone_number_id: "123".into(),
+ verify_token: "ver".into(),
+ allowed_numbers: vec!["+1".into()],
+ }),
+ };
+ let toml_str = toml::to_string_pretty(&c).unwrap();
+ let parsed: ChannelsConfig = toml::from_str(&toml_str).unwrap();
+ assert!(parsed.whatsapp.is_some());
+ let wa = parsed.whatsapp.unwrap();
+ assert_eq!(wa.phone_number_id, "123");
+ assert_eq!(wa.allowed_numbers, vec!["+1"]);
+ }
+
+ #[test]
+ fn channels_config_default_has_no_whatsapp() {
+ let c = ChannelsConfig::default();
+ assert!(c.whatsapp.is_none());
+ }
+
// ══════════════════════════════════════════════════════════
// SECURITY CHECKLIST TESTS — Gateway config
// ══════════════════════════════════════════════════════════
diff --git a/src/cron/mod.rs b/src/cron/mod.rs
index 8f52701..572670d 100644
--- a/src/cron/mod.rs
+++ b/src/cron/mod.rs
@@ -1,25 +1,353 @@
use crate::config::Config;
-use anyhow::Result;
+use anyhow::{Context, Result};
+use chrono::{DateTime, Utc};
+use cron::Schedule;
+use rusqlite::{params, Connection};
+use std::str::FromStr;
+use uuid::Uuid;
-pub fn handle_command(command: super::CronCommands, _config: Config) -> Result<()> {
+pub mod scheduler;
+
+#[derive(Debug, Clone)]
+pub struct CronJob {
+ pub id: String,
+ pub expression: String,
+ pub command: String,
+ pub next_run: DateTime,
+ pub last_run: Option>,
+ pub last_status: Option,
+}
+
+pub fn handle_command(command: super::CronCommands, config: Config) -> Result<()> {
match command {
super::CronCommands::List => {
- println!("No scheduled tasks yet.");
- println!("\nUsage:");
- println!(" zeroclaw cron add '0 9 * * *' 'agent -m \"Good morning!\"'");
+ let jobs = list_jobs(&config)?;
+ if jobs.is_empty() {
+ println!("No scheduled tasks yet.");
+ println!("\nUsage:");
+ println!(" zeroclaw cron add '0 9 * * *' 'agent -m \"Good morning!\"'");
+ return Ok(());
+ }
+
+ println!("🕒 Scheduled jobs ({}):", jobs.len());
+ for job in jobs {
+ let last_run = job
+ .last_run
+ .map(|d| d.to_rfc3339())
+ .unwrap_or_else(|| "never".into());
+ let last_status = job.last_status.unwrap_or_else(|| "n/a".into());
+ println!(
+ "- {} | {} | next={} | last={} ({})\n cmd: {}",
+ job.id,
+ job.expression,
+ job.next_run.to_rfc3339(),
+ last_run,
+ last_status,
+ job.command
+ );
+ }
Ok(())
}
super::CronCommands::Add {
expression,
command,
} => {
- println!("Cron scheduling coming soon!");
- println!(" Expression: {expression}");
- println!(" Command: {command}");
+ let job = add_job(&config, &expression, &command)?;
+ println!("✅ Added cron job {}", job.id);
+ println!(" Expr: {}", job.expression);
+ println!(" Next: {}", job.next_run.to_rfc3339());
+ println!(" Cmd : {}", job.command);
Ok(())
}
- super::CronCommands::Remove { id } => {
- anyhow::bail!("Remove task '{id}' not yet implemented");
- }
+ super::CronCommands::Remove { id } => remove_job(&config, &id),
+ }
+}
+
+pub fn add_job(config: &Config, expression: &str, command: &str) -> Result {
+ let now = Utc::now();
+ let next_run = next_run_for(expression, now)?;
+ let id = Uuid::new_v4().to_string();
+
+ with_connection(config, |conn| {
+ conn.execute(
+ "INSERT INTO cron_jobs (id, expression, command, created_at, next_run)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ id,
+ expression,
+ command,
+ now.to_rfc3339(),
+ next_run.to_rfc3339()
+ ],
+ )
+ .context("Failed to insert cron job")?;
+ Ok(())
+ })?;
+
+ Ok(CronJob {
+ id,
+ expression: expression.to_string(),
+ command: command.to_string(),
+ next_run,
+ last_run: None,
+ last_status: None,
+ })
+}
+
+pub fn list_jobs(config: &Config) -> Result> {
+ with_connection(config, |conn| {
+ let mut stmt = conn.prepare(
+ "SELECT id, expression, command, next_run, last_run, last_status
+ FROM cron_jobs ORDER BY next_run ASC",
+ )?;
+
+ let rows = stmt.query_map([], |row| {
+ let next_run_raw: String = row.get(3)?;
+ let last_run_raw: Option = row.get(4)?;
+ Ok((
+ row.get::<_, String>(0)?,
+ row.get::<_, String>(1)?,
+ row.get::<_, String>(2)?,
+ next_run_raw,
+ last_run_raw,
+ row.get::<_, Option>(5)?,
+ ))
+ })?;
+
+ let mut jobs = Vec::new();
+ for row in rows {
+ let (id, expression, command, next_run_raw, last_run_raw, last_status) = row?;
+ jobs.push(CronJob {
+ id,
+ expression,
+ command,
+ next_run: parse_rfc3339(&next_run_raw)?,
+ last_run: match last_run_raw {
+ Some(raw) => Some(parse_rfc3339(&raw)?),
+ None => None,
+ },
+ last_status,
+ });
+ }
+ Ok(jobs)
+ })
+}
+
+pub fn remove_job(config: &Config, id: &str) -> Result<()> {
+ let changed = with_connection(config, |conn| {
+ conn.execute("DELETE FROM cron_jobs WHERE id = ?1", params![id])
+ .context("Failed to delete cron job")
+ })?;
+
+ if changed == 0 {
+ anyhow::bail!("Cron job '{id}' not found");
+ }
+
+ println!("✅ Removed cron job {id}");
+ Ok(())
+}
+
+pub fn due_jobs(config: &Config, now: DateTime) -> Result> {
+ with_connection(config, |conn| {
+ let mut stmt = conn.prepare(
+ "SELECT id, expression, command, next_run, last_run, last_status
+ FROM cron_jobs WHERE next_run <= ?1 ORDER BY next_run ASC",
+ )?;
+
+ let rows = stmt.query_map(params![now.to_rfc3339()], |row| {
+ let next_run_raw: String = row.get(3)?;
+ let last_run_raw: Option = row.get(4)?;
+ Ok((
+ row.get::<_, String>(0)?,
+ row.get::<_, String>(1)?,
+ row.get::<_, String>(2)?,
+ next_run_raw,
+ last_run_raw,
+ row.get::<_, Option>(5)?,
+ ))
+ })?;
+
+ let mut jobs = Vec::new();
+ for row in rows {
+ let (id, expression, command, next_run_raw, last_run_raw, last_status) = row?;
+ jobs.push(CronJob {
+ id,
+ expression,
+ command,
+ next_run: parse_rfc3339(&next_run_raw)?,
+ last_run: match last_run_raw {
+ Some(raw) => Some(parse_rfc3339(&raw)?),
+ None => None,
+ },
+ last_status,
+ });
+ }
+ Ok(jobs)
+ })
+}
+
+pub fn reschedule_after_run(
+ config: &Config,
+ job: &CronJob,
+ success: bool,
+ output: &str,
+) -> Result<()> {
+ let now = Utc::now();
+ let next_run = next_run_for(&job.expression, now)?;
+ let status = if success { "ok" } else { "error" };
+
+ with_connection(config, |conn| {
+ conn.execute(
+ "UPDATE cron_jobs
+ SET next_run = ?1, last_run = ?2, last_status = ?3, last_output = ?4
+ WHERE id = ?5",
+ params![
+ next_run.to_rfc3339(),
+ now.to_rfc3339(),
+ status,
+ output,
+ job.id
+ ],
+ )
+ .context("Failed to update cron job run state")?;
+ Ok(())
+ })
+}
+
+fn next_run_for(expression: &str, from: DateTime) -> Result> {
+ let normalized = normalize_expression(expression)?;
+ let schedule = Schedule::from_str(&normalized)
+ .with_context(|| format!("Invalid cron expression: {expression}"))?;
+ schedule
+ .after(&from)
+ .next()
+ .ok_or_else(|| anyhow::anyhow!("No future occurrence for expression: {expression}"))
+}
+
+fn normalize_expression(expression: &str) -> Result {
+ let expression = expression.trim();
+ let field_count = expression.split_whitespace().count();
+
+ match field_count {
+ // standard crontab syntax: minute hour day month weekday
+ 5 => Ok(format!("0 {expression}")),
+ // crate-native syntax includes seconds (+ optional year)
+ 6 | 7 => Ok(expression.to_string()),
+ _ => anyhow::bail!(
+ "Invalid cron expression: {expression} (expected 5, 6, or 7 fields, got {field_count})"
+ ),
+ }
+}
+
+fn parse_rfc3339(raw: &str) -> Result> {
+ let parsed = DateTime::parse_from_rfc3339(raw)
+ .with_context(|| format!("Invalid RFC3339 timestamp in cron DB: {raw}"))?;
+ Ok(parsed.with_timezone(&Utc))
+}
+
+fn with_connection(config: &Config, f: impl FnOnce(&Connection) -> Result) -> Result {
+ let db_path = config.workspace_dir.join("cron").join("jobs.db");
+ if let Some(parent) = db_path.parent() {
+ std::fs::create_dir_all(parent)
+ .with_context(|| format!("Failed to create cron directory: {}", parent.display()))?;
+ }
+
+ let conn = Connection::open(&db_path)
+ .with_context(|| format!("Failed to open cron DB: {}", db_path.display()))?;
+
+ conn.execute_batch(
+ "CREATE TABLE IF NOT EXISTS cron_jobs (
+ id TEXT PRIMARY KEY,
+ expression TEXT NOT NULL,
+ command TEXT NOT NULL,
+ created_at TEXT NOT NULL,
+ next_run TEXT NOT NULL,
+ last_run TEXT,
+ last_status TEXT,
+ last_output TEXT
+ );
+ CREATE INDEX IF NOT EXISTS idx_cron_jobs_next_run ON cron_jobs(next_run);",
+ )
+ .context("Failed to initialize cron schema")?;
+
+ f(&conn)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::config::Config;
+ use chrono::Duration as ChronoDuration;
+ use tempfile::TempDir;
+
+ fn test_config(tmp: &TempDir) -> Config {
+ let mut config = Config::default();
+ config.workspace_dir = tmp.path().join("workspace");
+ config.config_path = tmp.path().join("config.toml");
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ config
+ }
+
+ #[test]
+ fn add_job_accepts_five_field_expression() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+
+ let job = add_job(&config, "*/5 * * * *", "echo ok").unwrap();
+
+ assert_eq!(job.expression, "*/5 * * * *");
+ assert_eq!(job.command, "echo ok");
+ }
+
+ #[test]
+ fn add_job_rejects_invalid_field_count() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+
+ let err = add_job(&config, "* * * *", "echo bad").unwrap_err();
+ assert!(err.to_string().contains("expected 5, 6, or 7 fields"));
+ }
+
+ #[test]
+ fn add_list_remove_roundtrip() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+
+ let job = add_job(&config, "*/10 * * * *", "echo roundtrip").unwrap();
+ let listed = list_jobs(&config).unwrap();
+ assert_eq!(listed.len(), 1);
+ assert_eq!(listed[0].id, job.id);
+
+ remove_job(&config, &job.id).unwrap();
+ assert!(list_jobs(&config).unwrap().is_empty());
+ }
+
+ #[test]
+ fn due_jobs_filters_by_timestamp() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+
+ let _job = add_job(&config, "* * * * *", "echo due").unwrap();
+
+ let due_now = due_jobs(&config, Utc::now()).unwrap();
+ assert!(due_now.is_empty(), "new job should not be due immediately");
+
+ let far_future = Utc::now() + ChronoDuration::days(365);
+ let due_future = due_jobs(&config, far_future).unwrap();
+ assert_eq!(due_future.len(), 1, "job should be due in far future");
+ }
+
+ #[test]
+ fn reschedule_after_run_persists_last_status_and_last_run() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+
+ let job = add_job(&config, "*/15 * * * *", "echo run").unwrap();
+ reschedule_after_run(&config, &job, false, "failed output").unwrap();
+
+ let listed = list_jobs(&config).unwrap();
+ let stored = listed.iter().find(|j| j.id == job.id).unwrap();
+ assert_eq!(stored.last_status.as_deref(), Some("error"));
+ assert!(stored.last_run.is_some());
}
}
diff --git a/src/cron/scheduler.rs b/src/cron/scheduler.rs
new file mode 100644
index 0000000..973fbee
--- /dev/null
+++ b/src/cron/scheduler.rs
@@ -0,0 +1,297 @@
+use crate::config::Config;
+use crate::cron::{due_jobs, reschedule_after_run, CronJob};
+use crate::security::SecurityPolicy;
+use anyhow::Result;
+use chrono::Utc;
+use tokio::process::Command;
+use tokio::time::{self, Duration};
+
+const MIN_POLL_SECONDS: u64 = 5;
+
+pub async fn run(config: Config) -> Result<()> {
+ let poll_secs = config.reliability.scheduler_poll_secs.max(MIN_POLL_SECONDS);
+ let mut interval = time::interval(Duration::from_secs(poll_secs));
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ crate::health::mark_component_ok("scheduler");
+
+ loop {
+ interval.tick().await;
+
+ let jobs = match due_jobs(&config, Utc::now()) {
+ Ok(jobs) => jobs,
+ Err(e) => {
+ crate::health::mark_component_error("scheduler", e.to_string());
+ tracing::warn!("Scheduler query failed: {e}");
+ continue;
+ }
+ };
+
+ for job in jobs {
+ crate::health::mark_component_ok("scheduler");
+ let (success, output) = execute_job_with_retry(&config, &security, &job).await;
+
+ if !success {
+ crate::health::mark_component_error("scheduler", format!("job {} failed", job.id));
+ }
+
+ if let Err(e) = reschedule_after_run(&config, &job, success, &output) {
+ crate::health::mark_component_error("scheduler", e.to_string());
+ tracing::warn!("Failed to persist scheduler run result: {e}");
+ }
+ }
+ }
+}
+
+async fn execute_job_with_retry(
+ config: &Config,
+ security: &SecurityPolicy,
+ job: &CronJob,
+) -> (bool, String) {
+ let mut last_output = String::new();
+ let retries = config.reliability.scheduler_retries;
+ let mut backoff_ms = config.reliability.provider_backoff_ms.max(200);
+
+ for attempt in 0..=retries {
+ let (success, output) = run_job_command(config, security, job).await;
+ last_output = output;
+
+ if success {
+ return (true, last_output);
+ }
+
+ if last_output.starts_with("blocked by security policy:") {
+ // Deterministic policy violations are not retryable.
+ return (false, last_output);
+ }
+
+ if attempt < retries {
+ let jitter_ms = (Utc::now().timestamp_subsec_millis() % 250) as u64;
+ time::sleep(Duration::from_millis(backoff_ms + jitter_ms)).await;
+ backoff_ms = (backoff_ms.saturating_mul(2)).min(30_000);
+ }
+ }
+
+ (false, last_output)
+}
+
+fn is_env_assignment(word: &str) -> bool {
+ word.contains('=')
+ && word
+ .chars()
+ .next()
+ .is_some_and(|c| c.is_ascii_alphabetic() || c == '_')
+}
+
+fn strip_wrapping_quotes(token: &str) -> &str {
+ token.trim_matches(|c| c == '"' || c == '\'')
+}
+
+fn forbidden_path_argument(security: &SecurityPolicy, command: &str) -> Option {
+ let mut normalized = command.to_string();
+ for sep in ["&&", "||"] {
+ normalized = normalized.replace(sep, "\x00");
+ }
+ for sep in ['\n', ';', '|'] {
+ normalized = normalized.replace(sep, "\x00");
+ }
+
+ for segment in normalized.split('\x00') {
+ let tokens: Vec<&str> = segment.split_whitespace().collect();
+ if tokens.is_empty() {
+ continue;
+ }
+
+ // Skip leading env assignments and executable token.
+ let mut idx = 0;
+ while idx < tokens.len() && is_env_assignment(tokens[idx]) {
+ idx += 1;
+ }
+ if idx >= tokens.len() {
+ continue;
+ }
+ idx += 1;
+
+ for token in &tokens[idx..] {
+ let candidate = strip_wrapping_quotes(token);
+ if candidate.is_empty() || candidate.starts_with('-') || candidate.contains("://") {
+ continue;
+ }
+
+ let looks_like_path = candidate.starts_with('/')
+ || candidate.starts_with("./")
+ || candidate.starts_with("../")
+ || candidate.starts_with("~/")
+ || candidate.contains('/');
+
+ if looks_like_path && !security.is_path_allowed(candidate) {
+ return Some(candidate.to_string());
+ }
+ }
+ }
+
+ None
+}
+
+async fn run_job_command(
+ config: &Config,
+ security: &SecurityPolicy,
+ job: &CronJob,
+) -> (bool, String) {
+ if !security.is_command_allowed(&job.command) {
+ return (
+ false,
+ format!(
+ "blocked by security policy: command not allowed: {}",
+ job.command
+ ),
+ );
+ }
+
+ if let Some(path) = forbidden_path_argument(security, &job.command) {
+ return (
+ false,
+ format!("blocked by security policy: forbidden path argument: {path}"),
+ );
+ }
+
+ let output = Command::new("sh")
+ .arg("-lc")
+ .arg(&job.command)
+ .current_dir(&config.workspace_dir)
+ .output()
+ .await;
+
+ match output {
+ Ok(output) => {
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ let combined = format!(
+ "status={}\nstdout:\n{}\nstderr:\n{}",
+ output.status,
+ stdout.trim(),
+ stderr.trim()
+ );
+ (output.status.success(), combined)
+ }
+ Err(e) => (false, format!("spawn error: {e}")),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::config::Config;
+ use crate::security::SecurityPolicy;
+ use tempfile::TempDir;
+
+ fn test_config(tmp: &TempDir) -> Config {
+ let mut config = Config::default();
+ config.workspace_dir = tmp.path().join("workspace");
+ config.config_path = tmp.path().join("config.toml");
+ std::fs::create_dir_all(&config.workspace_dir).unwrap();
+ config
+ }
+
+ fn test_job(command: &str) -> CronJob {
+ CronJob {
+ id: "test-job".into(),
+ expression: "* * * * *".into(),
+ command: command.into(),
+ next_run: Utc::now(),
+ last_run: None,
+ last_status: None,
+ }
+ }
+
+ #[tokio::test]
+ async fn run_job_command_success() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+ let job = test_job("echo scheduler-ok");
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_job_command(&config, &security, &job).await;
+ assert!(success);
+ assert!(output.contains("scheduler-ok"));
+ assert!(output.contains("status=exit status: 0"));
+ }
+
+ #[tokio::test]
+ async fn run_job_command_failure() {
+ let tmp = TempDir::new().unwrap();
+ let config = test_config(&tmp);
+ let job = test_job("ls definitely_missing_file_for_scheduler_test");
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_job_command(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("definitely_missing_file_for_scheduler_test"));
+ assert!(output.contains("status=exit status:"));
+ }
+
+ #[tokio::test]
+ async fn run_job_command_blocks_disallowed_command() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.autonomy.allowed_commands = vec!["echo".into()];
+ let job = test_job("curl https://evil.example");
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_job_command(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("blocked by security policy"));
+ assert!(output.contains("command not allowed"));
+ }
+
+ #[tokio::test]
+ async fn run_job_command_blocks_forbidden_path_argument() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.autonomy.allowed_commands = vec!["cat".into()];
+ let job = test_job("cat /etc/passwd");
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let (success, output) = run_job_command(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("blocked by security policy"));
+ assert!(output.contains("forbidden path argument"));
+ assert!(output.contains("/etc/passwd"));
+ }
+
+ #[tokio::test]
+ async fn execute_job_with_retry_recovers_after_first_failure() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.reliability.scheduler_retries = 1;
+ config.reliability.provider_backoff_ms = 1;
+ config.autonomy.allowed_commands = vec!["sh".into()];
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ std::fs::write(
+ config.workspace_dir.join("retry-once.sh"),
+ "#!/bin/sh\nif [ -f retry-ok.flag ]; then\n echo recovered\n exit 0\nfi\ntouch retry-ok.flag\nexit 1\n",
+ )
+ .unwrap();
+ let job = test_job("sh ./retry-once.sh");
+
+ let (success, output) = execute_job_with_retry(&config, &security, &job).await;
+ assert!(success);
+ assert!(output.contains("recovered"));
+ }
+
+ #[tokio::test]
+ async fn execute_job_with_retry_exhausts_attempts() {
+ let tmp = TempDir::new().unwrap();
+ let mut config = test_config(&tmp);
+ config.reliability.scheduler_retries = 1;
+ config.reliability.provider_backoff_ms = 1;
+ let security = SecurityPolicy::from_config(&config.autonomy, &config.workspace_dir);
+
+ let job = test_job("ls always_missing_for_retry_test");
+
+ let (success, output) = execute_job_with_retry(&config, &security, &job).await;
+ assert!(!success);
+ assert!(output.contains("always_missing_for_retry_test"));
+ }
+}
diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs
new file mode 100644
index 0000000..db374bc
--- /dev/null
+++ b/src/daemon/mod.rs
@@ -0,0 +1,287 @@
+use crate::config::Config;
+use anyhow::Result;
+use chrono::Utc;
+use std::future::Future;
+use std::path::PathBuf;
+use tokio::task::JoinHandle;
+use tokio::time::Duration;
+
+const STATUS_FLUSH_SECONDS: u64 = 5;
+
+pub async fn run(config: Config, host: String, port: u16) -> Result<()> {
+ let initial_backoff = config.reliability.channel_initial_backoff_secs.max(1);
+ let max_backoff = config
+ .reliability
+ .channel_max_backoff_secs
+ .max(initial_backoff);
+
+ crate::health::mark_component_ok("daemon");
+
+ if config.heartbeat.enabled {
+ let _ =
+ crate::heartbeat::engine::HeartbeatEngine::ensure_heartbeat_file(&config.workspace_dir)
+ .await;
+ }
+
+ let mut handles: Vec> = vec![spawn_state_writer(config.clone())];
+
+ {
+ let gateway_cfg = config.clone();
+ let gateway_host = host.clone();
+ handles.push(spawn_component_supervisor(
+ "gateway",
+ initial_backoff,
+ max_backoff,
+ move || {
+ let cfg = gateway_cfg.clone();
+ let host = gateway_host.clone();
+ async move { crate::gateway::run_gateway(&host, port, cfg).await }
+ },
+ ));
+ }
+
+ {
+ if has_supervised_channels(&config) {
+ let channels_cfg = config.clone();
+ handles.push(spawn_component_supervisor(
+ "channels",
+ initial_backoff,
+ max_backoff,
+ move || {
+ let cfg = channels_cfg.clone();
+ async move { crate::channels::start_channels(cfg).await }
+ },
+ ));
+ } else {
+ crate::health::mark_component_ok("channels");
+ tracing::info!("No real-time channels configured; channel supervisor disabled");
+ }
+ }
+
+ if config.heartbeat.enabled {
+ let heartbeat_cfg = config.clone();
+ handles.push(spawn_component_supervisor(
+ "heartbeat",
+ initial_backoff,
+ max_backoff,
+ move || {
+ let cfg = heartbeat_cfg.clone();
+ async move { run_heartbeat_worker(cfg).await }
+ },
+ ));
+ }
+
+ {
+ let scheduler_cfg = config.clone();
+ handles.push(spawn_component_supervisor(
+ "scheduler",
+ initial_backoff,
+ max_backoff,
+ move || {
+ let cfg = scheduler_cfg.clone();
+ async move { crate::cron::scheduler::run(cfg).await }
+ },
+ ));
+ }
+
+ println!("🧠 ZeroClaw daemon started");
+ println!(" Gateway: http://{host}:{port}");
+ println!(" Components: gateway, channels, heartbeat, scheduler");
+ println!(" Ctrl+C to stop");
+
+ tokio::signal::ctrl_c().await?;
+ crate::health::mark_component_error("daemon", "shutdown requested");
+
+ for handle in &handles {
+ handle.abort();
+ }
+ for handle in handles {
+ let _ = handle.await;
+ }
+
+ Ok(())
+}
+
+pub fn state_file_path(config: &Config) -> PathBuf {
+ config
+ .config_path
+ .parent()
+ .map_or_else(|| PathBuf::from("."), PathBuf::from)
+ .join("daemon_state.json")
+}
+
+fn spawn_state_writer(config: Config) -> JoinHandle<()> {
+ tokio::spawn(async move {
+ let path = state_file_path(&config);
+ if let Some(parent) = path.parent() {
+ let _ = tokio::fs::create_dir_all(parent).await;
+ }
+
+ let mut interval = tokio::time::interval(Duration::from_secs(STATUS_FLUSH_SECONDS));
+ loop {
+ interval.tick().await;
+ let mut json = crate::health::snapshot_json();
+ if let Some(obj) = json.as_object_mut() {
+ obj.insert(
+ "written_at".into(),
+ serde_json::json!(Utc::now().to_rfc3339()),
+ );
+ }
+ let data = serde_json::to_vec_pretty(&json).unwrap_or_else(|_| b"{}".to_vec());
+ let _ = tokio::fs::write(&path, data).await;
+ }
+ })
+}
+
+fn spawn_component_supervisor(
+ name: &'static str,
+ initial_backoff_secs: u64,
+ max_backoff_secs: u64,
+ mut run_component: F,
+) -> JoinHandle<()>
+where
+ F: FnMut() -> Fut + Send + 'static,
+ Fut: Future