Merge remote-tracking branch 'origin/main' into feat/glm-provider

Resolved conflicts in:
- Cargo.toml: kept both `ring` (JWT auth) and `prost` (protobuf) dependencies
- src/onboard/wizard.rs: accepted main branch version
- src/providers/mod.rs: accepted main branch version
- Cargo.lock: accepted main branch version

Note: The custom `glm::GlmProvider` from this PR was replaced with
main's OpenAiCompatibleProvider approach for GLM, which uses base URLs.
The main purpose of this PR is Windows daemon support via Task Scheduler.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
argenis de la rosa 2026-02-17 13:27:58 -05:00
commit 34af6a223a
269 changed files with 68574 additions and 2541 deletions

5
.cargo/config.toml Normal file
View file

@ -0,0 +1,5 @@
[target.x86_64-unknown-linux-musl]
rustflags = ["-C", "link-arg=-static"]
[target.aarch64-unknown-linux-musl]
rustflags = ["-C", "link-arg=-static"]

70
.env.example Normal file
View file

@ -0,0 +1,70 @@
# ZeroClaw Environment Variables
# Copy this file to `.env` and fill in your local values.
# Never commit `.env` or any real secrets.
# ── Core Runtime ──────────────────────────────────────────────
# Provider key resolution at runtime:
# 1) explicit key passed from config/CLI
# 2) provider-specific env var (OPENROUTER_API_KEY, OPENAI_API_KEY, ...)
# 3) generic fallback env vars below
# Generic fallback API key (used when provider-specific key is absent)
API_KEY=your-api-key-here
# ZEROCLAW_API_KEY=your-api-key-here
# Default provider/model (can be overridden by CLI flags)
PROVIDER=openrouter
# ZEROCLAW_PROVIDER=openrouter
# ZEROCLAW_MODEL=anthropic/claude-sonnet-4-20250514
# ZEROCLAW_TEMPERATURE=0.7
# Workspace directory override
# ZEROCLAW_WORKSPACE=/path/to/workspace
# ── Provider-Specific API Keys ────────────────────────────────
# OpenRouter
# OPENROUTER_API_KEY=sk-or-v1-...
# Anthropic
# ANTHROPIC_OAUTH_TOKEN=...
# ANTHROPIC_API_KEY=sk-ant-...
# OpenAI / Gemini
# OPENAI_API_KEY=sk-...
# GEMINI_API_KEY=...
# GOOGLE_API_KEY=...
# Other supported providers
# VENICE_API_KEY=...
# GROQ_API_KEY=...
# MISTRAL_API_KEY=...
# DEEPSEEK_API_KEY=...
# XAI_API_KEY=...
# TOGETHER_API_KEY=...
# FIREWORKS_API_KEY=...
# PERPLEXITY_API_KEY=...
# COHERE_API_KEY=...
# MOONSHOT_API_KEY=...
# GLM_API_KEY=...
# MINIMAX_API_KEY=...
# QIANFAN_API_KEY=...
# DASHSCOPE_API_KEY=...
# ZAI_API_KEY=...
# SYNTHETIC_API_KEY=...
# OPENCODE_API_KEY=...
# VERCEL_API_KEY=...
# CLOUDFLARE_API_KEY=...
# ── Gateway ──────────────────────────────────────────────────
# ZEROCLAW_GATEWAY_PORT=3000
# ZEROCLAW_GATEWAY_HOST=127.0.0.1
# ZEROCLAW_ALLOW_PUBLIC_BIND=false
# ── Optional Integrations ────────────────────────────────────
# Pushover notifications (`pushover` tool)
# PUSHOVER_TOKEN=your-pushover-app-token
# PUSHOVER_USER_KEY=your-pushover-user-key
# ── Docker Compose ───────────────────────────────────────────
# Host port mapping (used by docker-compose.yml)
# HOST_PORT=3000

8
.githooks/pre-commit Executable file
View file

@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -euo pipefail
if command -v gitleaks >/dev/null 2>&1; then
gitleaks protect --staged --redact
else
echo "warning: gitleaks not found; skipping staged secret scan" >&2
fi

View file

@ -6,21 +6,46 @@
set -euo pipefail set -euo pipefail
echo "==> pre-push: checking formatting..." echo "==> pre-push: running rust quality gate..."
cargo fmt -- --check || { ./scripts/ci/rust_quality_gate.sh || {
echo "FAIL: cargo fmt -- --check found unformatted code." echo "FAIL: rust quality gate failed."
echo "Run 'cargo fmt' and try again."
exit 1 exit 1
} }
echo "==> pre-push: running clippy..." if [ "${ZEROCLAW_STRICT_LINT:-0}" = "1" ]; then
cargo clippy -- -D warnings || { echo "==> pre-push: running strict clippy warnings gate (ZEROCLAW_STRICT_LINT=1)..."
echo "FAIL: clippy reported warnings." ./scripts/ci/rust_quality_gate.sh --strict || {
exit 1 echo "FAIL: strict clippy warnings gate reported issues."
} exit 1
}
fi
if [ "${ZEROCLAW_STRICT_DELTA_LINT:-0}" = "1" ]; then
echo "==> pre-push: running strict delta lint gate (ZEROCLAW_STRICT_DELTA_LINT=1)..."
./scripts/ci/rust_strict_delta_gate.sh || {
echo "FAIL: strict delta lint gate reported issues."
exit 1
}
fi
if [ "${ZEROCLAW_DOCS_LINT:-0}" = "1" ]; then
echo "==> pre-push: running docs quality gate (ZEROCLAW_DOCS_LINT=1)..."
./scripts/ci/docs_quality_gate.sh || {
echo "FAIL: docs quality gate reported issues."
exit 1
}
fi
if [ "${ZEROCLAW_DOCS_LINKS:-0}" = "1" ]; then
echo "==> pre-push: running docs links gate (ZEROCLAW_DOCS_LINKS=1)..."
./scripts/ci/docs_links_gate.sh || {
echo "FAIL: docs links gate reported issues."
exit 1
}
fi
echo "==> pre-push: running tests..." echo "==> pre-push: running tests..."
cargo test || { cargo test --locked || {
echo "FAIL: some tests did not pass." echo "FAIL: some tests did not pass."
exit 1 exit 1
} }

28
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,28 @@
# Default owner for all files
* @theonlyhennygod
# High-risk surfaces
/src/security/** @willsarg
/src/runtime/** @theonlyhennygod
/src/memory/** @theonlyhennygod @chumyin
/.github/** @theonlyhennygod
/Cargo.toml @theonlyhennygod
/Cargo.lock @theonlyhennygod
# CI
/.github/workflows/** @theonlyhennygod @willsarg
/.github/codeql/** @willsarg
/.github/dependabot.yml @willsarg
# Docs & governance
/docs/** @chumyin
/AGENTS.md @chumyin
/CLAUDE.md @chumyin
/CONTRIBUTING.md @chumyin
/docs/pr-workflow.md @chumyin
/docs/reviewer-playbook.md @chumyin
# Security / CI-CD governance overrides (last-match wins)
/SECURITY.md @willsarg
/docs/actions-source-policy.md @willsarg
/docs/ci-map.md @willsarg

148
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View file

@ -0,0 +1,148 @@
name: Bug Report
description: Report a reproducible defect in ZeroClaw
title: "[Bug]: "
labels:
- bug
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to report a bug.
Please provide a minimal reproducible case so maintainers can triage quickly.
Do not include personal/sensitive data; redact and anonymize all logs/payloads.
- type: input
id: summary
attributes:
label: Summary
description: One-line description of the problem.
placeholder: zeroclaw daemon exits immediately when ...
validations:
required: true
- type: dropdown
id: component
attributes:
label: Affected component
options:
- runtime/daemon
- provider
- channel
- memory
- security/sandbox
- tooling/ci
- docs
- unknown
validations:
required: true
- type: dropdown
id: severity
attributes:
label: Severity
options:
- S0 - data loss / security risk
- S1 - workflow blocked
- S2 - degraded behavior
- S3 - minor issue
validations:
required: true
- type: textarea
id: current
attributes:
label: Current behavior
description: What is happening now?
placeholder: The process exits with ...
validations:
required: true
- type: textarea
id: expected
attributes:
label: Expected behavior
description: What should happen instead?
placeholder: The daemon should stay alive and ...
validations:
required: true
- type: textarea
id: reproduce
attributes:
label: Steps to reproduce
description: Please provide exact commands/config.
placeholder: |
1. zeroclaw onboard --interactive
2. zeroclaw daemon
3. Observe crash in logs
render: bash
validations:
required: true
- type: textarea
id: impact
attributes:
label: Impact
description: Who is affected, how often, and practical consequences.
placeholder: |
Affected users: ...
Frequency: always/intermittent
Consequence: ...
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs / stack traces
description: Paste relevant logs (redact secrets, personal identifiers, and sensitive data).
render: text
validations:
required: false
- type: input
id: version
attributes:
label: ZeroClaw version
placeholder: v0.1.0 / commit SHA
validations:
required: true
- type: input
id: rust
attributes:
label: Rust version
placeholder: rustc 1.xx.x
validations:
required: true
- type: input
id: os
attributes:
label: Operating system
placeholder: Ubuntu 24.04 / macOS 15 / Windows 11
validations:
required: true
- type: dropdown
id: regression
attributes:
label: Regression?
options:
- Unknown
- Yes, it worked before
- No, first-time setup
validations:
required: true
- type: checkboxes
id: checks
attributes:
label: Pre-flight checks
options:
- label: I reproduced this on the latest main branch or latest release.
required: true
- label: I redacted secrets/tokens from logs.
required: true
- label: I removed personal identifiers and replaced identity-specific data with neutral placeholders.
required: true

11
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: Security vulnerability report
url: https://github.com/zeroclaw-labs/zeroclaw/security/policy
about: Please report security vulnerabilities privately via SECURITY.md policy.
- name: Contribution guide
url: https://github.com/zeroclaw-labs/zeroclaw/blob/main/CONTRIBUTING.md
about: Please read contribution and PR requirements before opening an issue.
- name: PR workflow & reviewer expectations
url: https://github.com/zeroclaw-labs/zeroclaw/blob/main/docs/pr-workflow.md
about: Read risk-based PR tracks, CI gates, and merge criteria before filing feature requests.

View file

@ -0,0 +1,107 @@
name: Feature Request
description: Propose an improvement or new capability
title: "[Feature]: "
labels:
- enhancement
body:
- type: markdown
attributes:
value: |
Thanks for sharing your idea.
Please focus on user value, constraints, and rollout safety.
Do not include personal/sensitive data; use neutral project-scoped placeholders.
- type: input
id: summary
attributes:
label: Summary
description: One-line statement of the requested capability.
placeholder: Add a provider-level retry budget override for long-running channels.
validations:
required: true
- type: textarea
id: problem
attributes:
label: Problem statement
description: What user pain does this solve and why is current behavior insufficient?
placeholder: Teams operating in unstable networks cannot tune retries per provider...
validations:
required: true
- type: textarea
id: proposal
attributes:
label: Proposed solution
description: Describe preferred behavior and interfaces.
placeholder: Add `[provider.retry]` config and enforce bounds in config validation.
validations:
required: true
- type: textarea
id: non_goals
attributes:
label: Non-goals / out of scope
description: Clarify what should not be included in the first iteration.
placeholder: No UI changes, no cross-provider dynamic adaptation in v1.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives considered
description: What alternatives did you evaluate?
placeholder: Keep current behavior, use wrapper scripts, etc.
validations:
required: false
- type: textarea
id: acceptance
attributes:
label: Acceptance criteria
description: What outcomes would make this request complete?
placeholder: |
- Config key is documented and validated
- Runtime path uses configured retry budget
- Regression tests cover fallback and invalid config
validations:
required: true
- type: textarea
id: architecture
attributes:
label: Architecture impact
description: Which subsystem(s) are affected?
placeholder: providers/, channels/, memory/, runtime/, security/, docs/ ...
validations:
required: true
- type: textarea
id: risk
attributes:
label: Risk and rollback
description: Main risk + how to disable/revert quickly.
placeholder: Risk is ... rollback is ...
validations:
required: true
- type: dropdown
id: breaking
attributes:
label: Breaking change?
options:
- No
- Yes
validations:
required: true
- type: checkboxes
id: hygiene
attributes:
label: Data hygiene checks
options:
- label: I removed personal/sensitive data from examples, payloads, and logs.
required: true
- label: I used neutral, project-focused wording and placeholders.
required: true

4
.github/actionlint.yaml vendored Normal file
View file

@ -0,0 +1,4 @@
self-hosted-runner:
labels:
- lxc-ci
- blacksmith-2vcpu-ubuntu-2404

8
.github/codeql/codeql-config.yml vendored Normal file
View file

@ -0,0 +1,8 @@
# CodeQL configuration for ZeroClaw
#
# We intentionally ignore integration tests under `tests/` because they often
# contain security-focused fixtures (example secrets, malformed payloads, etc.)
# that can trigger false positives in security queries.
paths-ignore:
- tests/**

35
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,35 @@
version: 2
updates:
- package-ecosystem: cargo
directory: "/"
schedule:
interval: weekly
target-branch: main
open-pull-requests-limit: 5
labels:
- "dependencies"
groups:
rust-minor-patch:
patterns:
- "*"
update-types:
- minor
- patch
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
target-branch: main
open-pull-requests-limit: 3
labels:
- "ci"
- "dependencies"
groups:
actions-minor-patch:
patterns:
- "*"
update-types:
- minor
- patch

21
.github/label-policy.json vendored Normal file
View file

@ -0,0 +1,21 @@
{
"contributor_tier_color": "2ED9FF",
"contributor_tiers": [
{
"label": "distinguished contributor",
"min_merged_prs": 50
},
{
"label": "principal contributor",
"min_merged_prs": 20
},
{
"label": "experienced contributor",
"min_merged_prs": 10
},
{
"label": "trusted contributor",
"min_merged_prs": 5
}
]
}

147
.github/labeler.yml vendored Normal file
View file

@ -0,0 +1,147 @@
"docs":
- changed-files:
- any-glob-to-any-file:
- "docs/**"
- "**/*.md"
- "**/*.mdx"
- "LICENSE"
- ".markdownlint-cli2.yaml"
"dependencies":
- changed-files:
- any-glob-to-any-file:
- "Cargo.toml"
- "Cargo.lock"
- "deny.toml"
- ".github/dependabot.yml"
"ci":
- changed-files:
- any-glob-to-any-file:
- ".github/**"
- ".githooks/**"
"core":
- changed-files:
- any-glob-to-any-file:
- "src/*.rs"
"agent":
- changed-files:
- any-glob-to-any-file:
- "src/agent/**"
"channel":
- changed-files:
- any-glob-to-any-file:
- "src/channels/**"
"gateway":
- changed-files:
- any-glob-to-any-file:
- "src/gateway/**"
"config":
- changed-files:
- any-glob-to-any-file:
- "src/config/**"
"cron":
- changed-files:
- any-glob-to-any-file:
- "src/cron/**"
"daemon":
- changed-files:
- any-glob-to-any-file:
- "src/daemon/**"
"doctor":
- changed-files:
- any-glob-to-any-file:
- "src/doctor/**"
"health":
- changed-files:
- any-glob-to-any-file:
- "src/health/**"
"heartbeat":
- changed-files:
- any-glob-to-any-file:
- "src/heartbeat/**"
"integration":
- changed-files:
- any-glob-to-any-file:
- "src/integrations/**"
"memory":
- changed-files:
- any-glob-to-any-file:
- "src/memory/**"
"security":
- changed-files:
- any-glob-to-any-file:
- "src/security/**"
"runtime":
- changed-files:
- any-glob-to-any-file:
- "src/runtime/**"
"onboard":
- changed-files:
- any-glob-to-any-file:
- "src/onboard/**"
"provider":
- changed-files:
- any-glob-to-any-file:
- "src/providers/**"
"service":
- changed-files:
- any-glob-to-any-file:
- "src/service/**"
"skillforge":
- changed-files:
- any-glob-to-any-file:
- "src/skillforge/**"
"skills":
- changed-files:
- any-glob-to-any-file:
- "src/skills/**"
"tool":
- changed-files:
- any-glob-to-any-file:
- "src/tools/**"
"tunnel":
- changed-files:
- any-glob-to-any-file:
- "src/tunnel/**"
"observability":
- changed-files:
- any-glob-to-any-file:
- "src/observability/**"
"tests":
- changed-files:
- any-glob-to-any-file:
- "tests/**"
"scripts":
- changed-files:
- any-glob-to-any-file:
- "scripts/**"
"dev":
- changed-files:
- any-glob-to-any-file:
- "dev/**"

109
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,109 @@
## Summary
Describe this PR in 2-5 bullets:
- Problem:
- Why it matters:
- What changed:
- What did **not** change (scope boundary):
## Label Snapshot (required)
- Risk label (`risk: low|medium|high`):
- Size label (`size: XS|S|M|L|XL`, auto-managed/read-only):
- Scope labels (`core|agent|channel|config|cron|daemon|doctor|gateway|health|heartbeat|integration|memory|observability|onboard|provider|runtime|security|service|skillforge|skills|tool|tunnel|docs|dependencies|ci|tests|scripts|dev`, comma-separated):
<<<<<<< chore/labeler-spacing-trusted-tier
- Module labels (`<module>: <component>`, for example `channel: telegram`, `provider: kimi`, `tool: shell`):
=======
- Module labels (`<module>:<component>`, for example `channel:telegram`, `provider:kimi`, `tool:shell`):
>>>>>>> main
- Contributor tier label (`trusted contributor|experienced contributor|principal contributor|distinguished contributor`, auto-managed/read-only; author merged PRs >=5/10/20/50):
- If any auto-label is incorrect, note requested correction:
## Change Metadata
- Change type (`bug|feature|refactor|docs|security|chore`):
- Primary scope (`runtime|provider|channel|memory|security|ci|docs|multi`):
## Linked Issue
- Closes #
- Related #
- Depends on # (if stacked)
- Supersedes # (if replacing older PR)
## Supersede Attribution (required when `Supersedes #` is used)
- Superseded PRs + authors (`#<pr> by @<author>`, one per line):
- Integrated scope by source PR (what was materially carried forward):
- `Co-authored-by` trailers added for materially incorporated contributors? (`Yes/No`)
- If `No`, explain why (for example: inspiration-only, no direct code/design carry-over):
- Trailer format check (separate lines, no escaped `\n`): (`Pass/Fail`)
## Validation Evidence (required)
Commands and result summary:
```bash
cargo fmt --all -- --check
cargo clippy --all-targets -- -D warnings
cargo test
```
- Evidence provided (test/log/trace/screenshot/perf):
- If any command is intentionally skipped, explain why:
## Security Impact (required)
- New permissions/capabilities? (`Yes/No`)
- New external network calls? (`Yes/No`)
- Secrets/tokens handling changed? (`Yes/No`)
- File system access scope changed? (`Yes/No`)
- If any `Yes`, describe risk and mitigation:
## Privacy and Data Hygiene (required)
- Data-hygiene status (`pass|needs-follow-up`):
- Redaction/anonymization notes:
- Neutral wording confirmation (use ZeroClaw/project-native labels if identity-like wording is needed):
## Compatibility / Migration
- Backward compatible? (`Yes/No`)
- Config/env changes? (`Yes/No`)
- Migration needed? (`Yes/No`)
- If yes, exact upgrade steps:
## Human Verification (required)
What was personally validated beyond CI:
- Verified scenarios:
- Edge cases checked:
- What was not verified:
## Side Effects / Blast Radius (required)
- Affected subsystems/workflows:
- Potential unintended effects:
- Guardrails/monitoring for early detection:
## Agent Collaboration Notes (recommended)
- Agent tools used (if any):
- Workflow/plan summary (if any):
- Verification focus:
- Confirmation: naming + architecture boundaries followed (`AGENTS.md` + `CONTRIBUTING.md`):
## Rollback Plan (required)
- Fast rollback command/path:
- Feature flags or config toggles (if any):
- Observable failure symptoms:
## Risks and Mitigations
List real risks in this PR (or write `None`).
- Risk:
- Mitigation:

285
.github/workflows/auto-response.yml vendored Normal file
View file

@ -0,0 +1,285 @@
name: PR Auto Responder
on:
issues:
types: [opened, reopened, labeled, unlabeled]
pull_request_target:
types: [opened, labeled, unlabeled]
permissions: {}
jobs:
contributor-tier-issues:
if: >-
(github.event_name == 'issues' &&
(github.event.action == 'opened' || github.event.action == 'reopened' || github.event.action == 'labeled' || github.event.action == 'unlabeled')) ||
(github.event_name == 'pull_request_target' &&
(github.event.action == 'labeled' || github.event.action == 'unlabeled'))
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
issues: write
pull-requests: write
steps:
- name: Apply contributor tier label for issue author
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const issue = context.payload.issue;
const pullRequest = context.payload.pull_request;
const target = issue ?? pullRequest;
async function loadContributorTierPolicy() {
const fallback = {
contributorTierColor: "2ED9FF",
contributorTierRules: [
{ label: "distinguished contributor", minMergedPRs: 50 },
{ label: "principal contributor", minMergedPRs: 20 },
{ label: "experienced contributor", minMergedPRs: 10 },
{ label: "trusted contributor", minMergedPRs: 5 },
],
};
try {
const { data } = await github.rest.repos.getContent({
owner,
repo,
path: ".github/label-policy.json",
ref: context.payload.repository?.default_branch || "main",
});
const json = JSON.parse(Buffer.from(data.content, "base64").toString("utf8"));
const contributorTierRules = (json.contributor_tiers || []).map((entry) => ({
label: String(entry.label || "").trim(),
minMergedPRs: Number(entry.min_merged_prs || 0),
}));
const contributorTierColor = String(json.contributor_tier_color || "").toUpperCase();
if (!contributorTierColor || contributorTierRules.length === 0) {
return fallback;
}
return { contributorTierColor, contributorTierRules };
} catch (error) {
core.warning(`failed to load .github/label-policy.json, using fallback policy: ${error.message}`);
return fallback;
}
}
const { contributorTierColor, contributorTierRules } = await loadContributorTierPolicy();
const contributorTierLabels = contributorTierRules.map((rule) => rule.label);
const managedContributorLabels = new Set(contributorTierLabels);
const action = context.payload.action;
const changedLabel = context.payload.label?.name;
if (!target) return;
if ((action === "labeled" || action === "unlabeled") && !managedContributorLabels.has(changedLabel)) {
return;
}
const author = target.user;
if (!author || author.type === "Bot") return;
function contributorTierDescription(rule) {
return `Contributor with ${rule.minMergedPRs}+ merged PRs.`;
}
async function ensureContributorTierLabels() {
for (const rule of contributorTierRules) {
const label = rule.label;
const expectedDescription = contributorTierDescription(rule);
try {
const { data: existing } = await github.rest.issues.getLabel({ owner, repo, name: label });
const currentColor = (existing.color || "").toUpperCase();
const currentDescription = (existing.description || "").trim();
if (currentColor !== contributorTierColor || currentDescription !== expectedDescription) {
await github.rest.issues.updateLabel({
owner,
repo,
name: label,
new_name: label,
color: contributorTierColor,
description: expectedDescription,
});
}
} catch (error) {
if (error.status !== 404) throw error;
await github.rest.issues.createLabel({
owner,
repo,
name: label,
color: contributorTierColor,
description: expectedDescription,
});
}
}
}
function selectContributorTier(mergedCount) {
const matchedTier = contributorTierRules.find((rule) => mergedCount >= rule.minMergedPRs);
return matchedTier ? matchedTier.label : null;
}
let contributorTierLabel = null;
try {
const { data: mergedSearch } = await github.rest.search.issuesAndPullRequests({
q: `repo:${owner}/${repo} is:pr is:merged author:${author.login}`,
per_page: 1,
});
const mergedCount = mergedSearch.total_count || 0;
contributorTierLabel = selectContributorTier(mergedCount);
} catch (error) {
core.warning(`failed to evaluate contributor tier status: ${error.message}`);
return;
}
await ensureContributorTierLabels();
const { data: currentLabels } = await github.rest.issues.listLabelsOnIssue({
owner,
repo,
issue_number: target.number,
});
const keepLabels = currentLabels
.map((label) => label.name)
.filter((label) => !contributorTierLabels.includes(label));
if (contributorTierLabel) {
keepLabels.push(contributorTierLabel);
}
await github.rest.issues.setLabels({
owner,
repo,
issue_number: target.number,
labels: [...new Set(keepLabels)],
});
first-interaction:
if: github.event.action == 'opened'
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
issues: write
pull-requests: write
steps:
- name: Greet first-time contributors
uses: actions/first-interaction@2ec0f0fd78838633cd1c1342e4536d49ef72be54 # v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-message: |
Thanks for opening this issue.
Before maintainers triage it, please confirm:
- Repro steps are complete and run on latest `main`
- Environment details are included (OS, Rust version, ZeroClaw version)
- Sensitive values are redacted
This helps us keep issue throughput high and response latency low.
pr-message: |
Thanks for contributing to ZeroClaw.
For faster review, please ensure:
- PR template sections are fully completed
- `cargo fmt --all -- --check`, `cargo clippy --all-targets -- -D warnings`, and `cargo test` are included
- If automation/agents were used heavily, add brief workflow notes
- Scope is focused (prefer one concern per PR)
See `CONTRIBUTING.md` and `docs/pr-workflow.md` for full collaboration rules.
labeled-routes:
if: github.event.action == 'labeled'
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
issues: write
pull-requests: write
steps:
- name: Handle label-driven responses
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const label = context.payload.label?.name;
if (!label) return;
const issue = context.payload.issue;
const pullRequest = context.payload.pull_request;
const target = issue ?? pullRequest;
if (!target) return;
const isIssue = Boolean(issue);
const issueNumber = target.number;
const owner = context.repo.owner;
const repo = context.repo.repo;
const rules = [
{
label: "r:support",
close: true,
closeIssuesOnly: true,
closeReason: "not_planned",
message:
"This looks like a usage/support request. Please use README + docs first, then open a focused bug with repro details if behavior is incorrect.",
},
{
label: "r:needs-repro",
close: false,
message:
"Thanks for the report. Please add deterministic repro steps, exact environment, and redacted logs so maintainers can triage quickly.",
},
{
label: "invalid",
close: true,
closeIssuesOnly: true,
closeReason: "not_planned",
message:
"Closing as invalid based on current information. If this is still relevant, open a new issue with updated evidence and reproducible steps.",
},
{
label: "duplicate",
close: true,
closeIssuesOnly: true,
closeReason: "not_planned",
message:
"Closing as duplicate. Please continue discussion in the canonical linked issue/PR.",
},
];
const rule = rules.find((entry) => entry.label === label);
if (!rule) return;
const marker = `<!-- auto-response:${rule.label} -->`;
const comments = await github.paginate(github.rest.issues.listComments, {
owner,
repo,
issue_number: issueNumber,
per_page: 100,
});
const alreadyCommented = comments.some((comment) =>
(comment.body || "").includes(marker)
);
if (!alreadyCommented) {
await github.rest.issues.createComment({
owner,
repo,
issue_number: issueNumber,
body: `${rule.message}\n\n${marker}`,
});
}
if (!rule.close) return;
if (rule.closeIssuesOnly && !isIssue) return;
if (target.state === "closed") return;
if (isIssue) {
await github.rest.issues.update({
owner,
repo,
issue_number: issueNumber,
state: "closed",
state_reason: rule.closeReason || "not_planned",
});
} else {
await github.rest.issues.update({
owner,
repo,
issue_number: issueNumber,
state: "closed",
});
}

View file

@ -1,45 +1,531 @@
name: CI name: CI
on: on:
push: push:
branches: [main, develop] branches: [main]
pull_request: pull_request:
branches: [main] branches: [main]
concurrency:
group: ci-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
permissions:
contents: read
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
jobs: jobs:
test: changes:
name: Test name: Detect Change Scope
runs-on: ubuntu-latest runs-on: blacksmith-2vcpu-ubuntu-2404
continue-on-error: true # Don't block PRs outputs:
steps: docs_only: ${{ steps.scope.outputs.docs_only }}
- uses: actions/checkout@v4 docs_changed: ${{ steps.scope.outputs.docs_changed }}
- uses: dtolnay/rust-toolchain@stable rust_changed: ${{ steps.scope.outputs.rust_changed }}
- uses: Swatinem/rust-cache@v2 workflow_changed: ${{ steps.scope.outputs.workflow_changed }}
- name: Run tests docs_files: ${{ steps.scope.outputs.docs_files }}
run: cargo test --verbose base_sha: ${{ steps.scope.outputs.base_sha }}
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
build: - name: Detect docs-only changes
name: Build id: scope
runs-on: ${{ matrix.os }} shell: bash
continue-on-error: true # Don't block PRs run: |
strategy: set -euo pipefail
matrix:
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
- os: macos-latest
target: x86_64-apple-darwin
- os: macos-latest
target: aarch64-apple-darwin
- os: windows-latest
target: x86_64-pc-windows-msvc
steps: write_empty_docs_files() {
- uses: actions/checkout@v4 {
- uses: dtolnay/rust-toolchain@stable echo "docs_files<<EOF"
- uses: Swatinem/rust-cache@v2 echo "EOF"
- name: Build } >> "$GITHUB_OUTPUT"
run: cargo build --release --verbose }
if [ "${{ github.event_name }}" = "pull_request" ]; then
BASE="${{ github.event.pull_request.base.sha }}"
else
BASE="${{ github.event.before }}"
fi
if [ -z "$BASE" ] || ! git cat-file -e "$BASE^{commit}" 2>/dev/null; then
{
echo "docs_only=false"
echo "docs_changed=false"
echo "rust_changed=true"
echo "workflow_changed=false"
echo "base_sha="
} >> "$GITHUB_OUTPUT"
write_empty_docs_files
exit 0
fi
CHANGED="$(git diff --name-only "$BASE" HEAD || true)"
if [ -z "$CHANGED" ]; then
{
echo "docs_only=false"
echo "docs_changed=false"
echo "rust_changed=false"
echo "workflow_changed=false"
echo "base_sha=$BASE"
} >> "$GITHUB_OUTPUT"
write_empty_docs_files
exit 0
fi
docs_only=true
docs_changed=false
rust_changed=false
workflow_changed=false
docs_files=()
while IFS= read -r file; do
[ -z "$file" ] && continue
if [[ "$file" == .github/workflows/* ]]; then
workflow_changed=true
fi
if [[ "$file" == docs/* ]] \
|| [[ "$file" == *.md ]] \
|| [[ "$file" == *.mdx ]] \
|| [[ "$file" == "LICENSE" ]] \
|| [[ "$file" == ".markdownlint-cli2.yaml" ]] \
|| [[ "$file" == .github/ISSUE_TEMPLATE/* ]] \
|| [[ "$file" == .github/pull_request_template.md ]]; then
if [[ "$file" == *.md ]] \
|| [[ "$file" == *.mdx ]] \
|| [[ "$file" == "LICENSE" ]] \
|| [[ "$file" == .github/pull_request_template.md ]]; then
docs_changed=true
docs_files+=("$file")
fi
continue
fi
docs_only=false
if [[ "$file" == src/* ]] \
|| [[ "$file" == tests/* ]] \
|| [[ "$file" == "Cargo.toml" ]] \
|| [[ "$file" == "Cargo.lock" ]] \
|| [[ "$file" == "deny.toml" ]]; then
rust_changed=true
fi
done <<< "$CHANGED"
{
echo "docs_only=$docs_only"
echo "docs_changed=$docs_changed"
echo "rust_changed=$rust_changed"
echo "workflow_changed=$workflow_changed"
echo "base_sha=$BASE"
echo "docs_files<<EOF"
printf '%s\n' "${docs_files[@]}"
echo "EOF"
} >> "$GITHUB_OUTPUT"
lint:
name: Lint Gate (Format + Clippy)
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
components: rustfmt, clippy
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Run rust quality gate
run: ./scripts/ci/rust_quality_gate.sh
lint-strict-delta:
name: Lint Gate (Strict Delta)
needs: [changes]
if: needs.changes.outputs.rust_changed == 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 25
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
components: clippy
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Run strict lint delta gate
env:
BASE_SHA: ${{ needs.changes.outputs.base_sha }}
run: ./scripts/ci/rust_strict_delta_gate.sh
test:
name: Test
needs: [changes, lint, lint-strict-delta]
if: needs.changes.outputs.rust_changed == 'true' && needs.lint.result == 'success' && needs.lint-strict-delta.result == 'success'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 30
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Run tests
run: cargo test --locked --verbose
build:
name: Build (Smoke)
needs: [changes, lint, lint-strict-delta]
if: needs.changes.outputs.rust_changed == 'true' && needs.lint.result == 'success' && needs.lint-strict-delta.result == 'success'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: 1.92.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Build release binary
run: cargo build --release --locked --verbose
docs-only:
name: Docs-Only Fast Path
needs: [changes]
if: needs.changes.outputs.docs_only == 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- name: Skip heavy jobs for docs-only change
run: echo "Docs-only change detected. Rust lint/test/build skipped."
non-rust:
name: Non-Rust Fast Path
needs: [changes]
if: needs.changes.outputs.docs_only != 'true' && needs.changes.outputs.rust_changed != 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- name: Skip Rust jobs for non-Rust change scope
run: echo "No Rust-impacting files changed. Rust lint/test/build skipped."
docs-quality:
name: Docs Quality
needs: [changes]
if: needs.changes.outputs.docs_changed == 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 15
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
- name: Markdown lint (changed lines only)
env:
BASE_SHA: ${{ needs.changes.outputs.base_sha }}
DOCS_FILES: ${{ needs.changes.outputs.docs_files }}
run: ./scripts/ci/docs_quality_gate.sh
- name: Collect added links
id: collect_links
shell: bash
env:
BASE_SHA: ${{ needs.changes.outputs.base_sha }}
DOCS_FILES: ${{ needs.changes.outputs.docs_files }}
run: |
set -euo pipefail
python3 ./scripts/ci/collect_changed_links.py \
--base "$BASE_SHA" \
--docs-files "$DOCS_FILES" \
--output .ci-added-links.txt
count=$(wc -l < .ci-added-links.txt | tr -d ' ')
echo "count=$count" >> "$GITHUB_OUTPUT"
if [ "$count" -gt 0 ]; then
echo "Added links queued for check:"
cat .ci-added-links.txt
else
echo "No added links found in changed docs lines."
fi
- name: Link check (offline, added links only)
if: steps.collect_links.outputs.count != '0'
uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0 # v2
with:
fail: true
args: >-
--offline
--no-progress
--format detailed
.ci-added-links.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Skip link check (no added links)
if: steps.collect_links.outputs.count == '0'
run: echo "No added links in changed docs lines. Link check skipped."
lint-feedback:
name: Lint Feedback
if: github.event_name == 'pull_request'
needs: [changes, lint, lint-strict-delta, docs-quality]
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
contents: read
pull-requests: write
issues: write
steps:
- name: Post actionable lint failure summary
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
RUST_CHANGED: ${{ needs.changes.outputs.rust_changed }}
DOCS_CHANGED: ${{ needs.changes.outputs.docs_changed }}
LINT_RESULT: ${{ needs.lint.result }}
LINT_DELTA_RESULT: ${{ needs.lint-strict-delta.result }}
DOCS_RESULT: ${{ needs.docs-quality.result }}
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const issueNumber = context.payload.pull_request?.number;
if (!issueNumber) return;
const marker = "<!-- ci-lint-feedback -->";
const rustChanged = process.env.RUST_CHANGED === "true";
const docsChanged = process.env.DOCS_CHANGED === "true";
const lintResult = process.env.LINT_RESULT || "skipped";
const lintDeltaResult = process.env.LINT_DELTA_RESULT || "skipped";
const docsResult = process.env.DOCS_RESULT || "skipped";
const failures = [];
if (rustChanged && !["success", "skipped"].includes(lintResult)) {
failures.push("`Lint Gate (Format + Clippy)` failed.");
}
if (rustChanged && !["success", "skipped"].includes(lintDeltaResult)) {
failures.push("`Lint Gate (Strict Delta)` failed.");
}
if (docsChanged && !["success", "skipped"].includes(docsResult)) {
failures.push("`Docs Quality` failed.");
}
const comments = await github.paginate(github.rest.issues.listComments, {
owner,
repo,
issue_number: issueNumber,
per_page: 100,
});
const existing = comments.find((comment) => (comment.body || "").includes(marker));
if (failures.length === 0) {
if (existing) {
await github.rest.issues.deleteComment({
owner,
repo,
comment_id: existing.id,
});
}
core.info("No lint/docs gate failures. No feedback comment required.");
return;
}
const runUrl = `${context.serverUrl}/${owner}/${repo}/actions/runs/${context.runId}`;
const body = [
marker,
"### CI lint feedback",
"",
"This PR failed one or more fast lint/documentation gates:",
"",
...failures.map((item) => `- ${item}`),
"",
"Open the failing logs in this run:",
`- ${runUrl}`,
"",
"Local fix commands:",
"- `./scripts/ci/rust_quality_gate.sh`",
"- `./scripts/ci/rust_strict_delta_gate.sh`",
"- `./scripts/ci/docs_quality_gate.sh`",
"",
"After fixes, push a new commit and CI will re-run automatically.",
].join("\n");
if (existing) {
await github.rest.issues.updateComment({
owner,
repo,
comment_id: existing.id,
body,
});
} else {
await github.rest.issues.createComment({
owner,
repo,
issue_number: issueNumber,
body,
});
}
workflow-owner-approval:
name: Workflow Owner Approval
needs: [changes]
if: github.event_name == 'pull_request' && needs.changes.outputs.workflow_changed == 'true'
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
contents: read
pull-requests: read
steps:
- name: Require owner approval for workflow file changes
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
env:
WORKFLOW_OWNER_LOGINS: ${{ vars.WORKFLOW_OWNER_LOGINS || 'theonlyhennygod,willsarg' }}
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const prNumber = context.payload.pull_request?.number;
if (!prNumber) {
core.setFailed("Missing pull_request context.");
return;
}
const ownerAllowlist = (process.env.WORKFLOW_OWNER_LOGINS || "")
.split(",")
.map((login) => login.trim().toLowerCase())
.filter(Boolean);
if (ownerAllowlist.length === 0) {
core.setFailed("WORKFLOW_OWNER_LOGINS is empty. Set a repository variable or use a fallback value.");
return;
}
const files = await github.paginate(github.rest.pulls.listFiles, {
owner,
repo,
pull_number: prNumber,
per_page: 100,
});
const workflowFiles = files
.map((file) => file.filename)
.filter((name) => name.startsWith(".github/workflows/"));
if (workflowFiles.length === 0) {
core.info("No workflow files changed in this PR.");
return;
}
core.info(`Workflow files changed:\n- ${workflowFiles.join("\n- ")}`);
const reviews = await github.paginate(github.rest.pulls.listReviews, {
owner,
repo,
pull_number: prNumber,
per_page: 100,
});
const latestReviewByUser = new Map();
for (const review of reviews) {
const login = review.user?.login;
if (!login) continue;
latestReviewByUser.set(login.toLowerCase(), review.state);
}
const approvedUsers = [...latestReviewByUser.entries()]
.filter(([, state]) => state === "APPROVED")
.map(([login]) => login);
if (approvedUsers.length === 0) {
core.setFailed("Workflow files changed but no approving review is present.");
return;
}
const ownerApprover = approvedUsers.find((login) => ownerAllowlist.includes(login));
if (!ownerApprover) {
core.setFailed(
`Workflow files changed. Approvals found (${approvedUsers.join(", ")}), but none match WORKFLOW_OWNER_LOGINS.`,
);
return;
}
core.info(`Workflow owner approval present: @${ownerApprover}`);
ci-required:
name: CI Required Gate
if: always()
needs: [changes, lint, lint-strict-delta, test, build, docs-only, non-rust, docs-quality, lint-feedback, workflow-owner-approval]
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- name: Enforce required status
shell: bash
run: |
set -euo pipefail
docs_changed="${{ needs.changes.outputs.docs_changed }}"
rust_changed="${{ needs.changes.outputs.rust_changed }}"
workflow_changed="${{ needs.changes.outputs.workflow_changed }}"
docs_result="${{ needs.docs-quality.result }}"
workflow_owner_result="${{ needs.workflow-owner-approval.result }}"
if [ "${{ needs.changes.outputs.docs_only }}" = "true" ]; then
echo "docs=${docs_result}"
echo "workflow_owner_approval=${workflow_owner_result}"
if [ "$workflow_changed" = "true" ] && [ "$workflow_owner_result" != "success" ]; then
echo "Workflow files changed but workflow owner approval gate did not pass."
exit 1
fi
if [ "$docs_changed" = "true" ] && [ "$docs_result" != "success" ]; then
echo "Docs-only change touched markdown docs, but docs-quality did not pass."
exit 1
fi
echo "Docs-only fast path passed."
exit 0
fi
if [ "$rust_changed" != "true" ]; then
echo "rust_changed=false (non-rust fast path)"
echo "docs=${docs_result}"
echo "workflow_owner_approval=${workflow_owner_result}"
if [ "$workflow_changed" = "true" ] && [ "$workflow_owner_result" != "success" ]; then
echo "Workflow files changed but workflow owner approval gate did not pass."
exit 1
fi
if [ "$docs_changed" = "true" ] && [ "$docs_result" != "success" ]; then
echo "Docs changed but docs-quality did not pass."
exit 1
fi
echo "Non-rust fast path passed."
exit 0
fi
lint_result="${{ needs.lint.result }}"
lint_strict_delta_result="${{ needs.lint-strict-delta.result }}"
test_result="${{ needs.test.result }}"
build_result="${{ needs.build.result }}"
echo "lint=${lint_result}"
echo "lint_strict_delta=${lint_strict_delta_result}"
echo "test=${test_result}"
echo "build=${build_result}"
echo "docs=${docs_result}"
echo "workflow_owner_approval=${workflow_owner_result}"
if [ "$lint_result" != "success" ] || [ "$lint_strict_delta_result" != "success" ] || [ "$test_result" != "success" ] || [ "$build_result" != "success" ]; then
echo "Required CI jobs did not pass."
exit 1
fi
if [ "$workflow_changed" = "true" ] && [ "$workflow_owner_result" != "success" ]; then
echo "Workflow files changed but workflow owner approval gate did not pass."
exit 1
fi
if [ "$docs_changed" = "true" ] && [ "$docs_result" != "success" ]; then
echo "Docs changed but docs-quality did not pass."
exit 1
fi
echo "All required CI jobs passed."

39
.github/workflows/codeql.yml vendored Normal file
View file

@ -0,0 +1,39 @@
name: CodeQL Analysis
on:
schedule:
- cron: "0 6,18 * * *" # Twice daily at 6am and 6pm UTC
workflow_dispatch:
concurrency:
group: codeql-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
security-events: write
actions: read
jobs:
codeql:
name: CodeQL Analysis
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:
languages: rust
config-file: ./.github/codeql/codeql-config.yml
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
- name: Build
run: cargo build --workspace --all-targets
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4

View file

@ -1,65 +1,110 @@
name: Docker name: Docker
on: on:
push: push:
branches: [main] branches: [main]
tags: ["v*"] tags: ["v*"]
pull_request: pull_request:
branches: [main] branches: [main]
paths:
- "Dockerfile"
- "docker-compose.yml"
- "dev/docker-compose.yml"
- "dev/sandbox/**"
- ".github/workflows/docker.yml"
workflow_dispatch:
concurrency:
group: docker-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env: env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }} IMAGE_NAME: ${{ github.repository }}
jobs: jobs:
build-and-push: pr-smoke:
name: Build and Push Docker Image name: PR Docker Smoke
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
- name: Verify image (PR only)
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
run: | runs-on: blacksmith-2vcpu-ubuntu-2404
docker build -t zeroclaw-test . timeout-minutes: 25
docker run --rm zeroclaw-test --version permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Setup Blacksmith Builder
uses: useblacksmith/setup-docker-builder@ef12d5b165b596e3aa44ea8198d8fde563eab402 # v1
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=pr
- name: Build smoke image
uses: useblacksmith/build-push-action@30c71162f16ea2c27c3e21523255d209b8b538c1 # v2
with:
context: .
push: false
load: true
tags: zeroclaw-pr-smoke:latest
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64
- name: Verify image
run: docker run --rm zeroclaw-pr-smoke:latest --version
publish:
name: Build and Push Docker Image
if: github.event_name == 'push'
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 25
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Setup Blacksmith Builder
uses: useblacksmith/setup-docker-builder@ef12d5b165b596e3aa44ea8198d8fde563eab402 # v1
- name: Log in to Container Registry
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Compute tags
id: meta
shell: bash
run: |
set -euo pipefail
IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}"
SHA_TAG="${IMAGE}:sha-${GITHUB_SHA::12}"
if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
TAG_NAME="${GITHUB_REF#refs/tags/}"
TAGS="${IMAGE}:${TAG_NAME},${SHA_TAG}"
elif [[ "${GITHUB_REF}" == "refs/heads/main" ]]; then
TAGS="${IMAGE}:latest,${SHA_TAG}"
else
BRANCH_NAME="${GITHUB_REF#refs/heads/}"
BRANCH_NAME="${BRANCH_NAME//\//-}"
TAGS="${IMAGE}:${BRANCH_NAME},${SHA_TAG}"
fi
echo "tags=${TAGS}" >> "$GITHUB_OUTPUT"
- name: Build and push Docker image
uses: useblacksmith/build-push-action@30c71162f16ea2c27c3e21523255d209b8b538c1 # v2
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
platforms: ${{ startsWith(github.ref, 'refs/tags/') && 'linux/amd64,linux/arm64' || 'linux/amd64' }}

View file

@ -0,0 +1,74 @@
name: Label Policy Sanity
on:
pull_request:
paths:
- ".github/label-policy.json"
- ".github/workflows/labeler.yml"
- ".github/workflows/auto-response.yml"
push:
paths:
- ".github/label-policy.json"
- ".github/workflows/labeler.yml"
- ".github/workflows/auto-response.yml"
concurrency:
group: label-policy-sanity-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
permissions:
contents: read
jobs:
contributor-tier-consistency:
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Verify shared label policy and workflow wiring
shell: bash
run: |
set -euo pipefail
python3 - <<'PY'
import json
import re
from pathlib import Path
policy_path = Path('.github/label-policy.json')
policy = json.loads(policy_path.read_text(encoding='utf-8'))
color = str(policy.get('contributor_tier_color', '')).upper()
rules = policy.get('contributor_tiers', [])
if not re.fullmatch(r'[0-9A-F]{6}', color):
raise SystemExit('invalid contributor_tier_color in .github/label-policy.json')
if not rules:
raise SystemExit('contributor_tiers must not be empty in .github/label-policy.json')
labels = set()
prev_min = None
for entry in rules:
label = str(entry.get('label', '')).strip().lower()
min_merged = int(entry.get('min_merged_prs', 0))
if not label.endswith('contributor'):
raise SystemExit(f'invalid contributor tier label: {label}')
if label in labels:
raise SystemExit(f'duplicate contributor tier label: {label}')
if prev_min is not None and min_merged > prev_min:
raise SystemExit('contributor_tiers must be sorted descending by min_merged_prs')
labels.add(label)
prev_min = min_merged
workflow_paths = [
Path('.github/workflows/labeler.yml'),
Path('.github/workflows/auto-response.yml'),
]
for workflow in workflow_paths:
text = workflow.read_text(encoding='utf-8')
if '.github/label-policy.json' not in text:
raise SystemExit(f'{workflow} must load .github/label-policy.json')
if re.search(r'contributorTierColor\s*=\s*"[0-9A-Fa-f]{6}"', text):
raise SystemExit(f'{workflow} contains hardcoded contributorTierColor')
print('label policy file is valid and workflow consumers are wired to shared policy')
PY

841
.github/workflows/labeler.yml vendored Normal file
View file

@ -0,0 +1,841 @@
name: PR Labeler
on:
pull_request_target:
types: [opened, reopened, synchronize, edited, labeled, unlabeled]
workflow_dispatch:
inputs:
mode:
description: "Run mode for managed-label governance"
required: true
default: "audit"
type: choice
options:
- audit
- repair
concurrency:
group: pr-labeler-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
issues: write
jobs:
label:
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 10
steps:
- name: Apply path labels
if: github.event_name == 'pull_request_target'
uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5
continue-on-error: true
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
sync-labels: true
- name: Apply size/risk/module labels
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
continue-on-error: true
with:
script: |
const pr = context.payload.pull_request;
const owner = context.repo.owner;
const repo = context.repo.repo;
const action = context.payload.action;
const changedLabel = context.payload.label?.name;
const sizeLabels = ["size: XS", "size: S", "size: M", "size: L", "size: XL"];
const computedRiskLabels = ["risk: low", "risk: medium", "risk: high"];
const manualRiskOverrideLabel = "risk: manual";
const managedEnforcedLabels = new Set([
...sizeLabels,
manualRiskOverrideLabel,
...computedRiskLabels,
]);
if ((action === "labeled" || action === "unlabeled") && !managedEnforcedLabels.has(changedLabel)) {
core.info(`skip non-size/risk label event: ${changedLabel || "unknown"}`);
return;
}
async function loadContributorTierPolicy() {
const fallback = {
contributorTierColor: "2ED9FF",
contributorTierRules: [
{ label: "distinguished contributor", minMergedPRs: 50 },
{ label: "principal contributor", minMergedPRs: 20 },
{ label: "experienced contributor", minMergedPRs: 10 },
{ label: "trusted contributor", minMergedPRs: 5 },
],
};
try {
const { data } = await github.rest.repos.getContent({
owner,
repo,
path: ".github/label-policy.json",
ref: context.payload.repository?.default_branch || "main",
});
const json = JSON.parse(Buffer.from(data.content, "base64").toString("utf8"));
const contributorTierRules = (json.contributor_tiers || []).map((entry) => ({
label: String(entry.label || "").trim(),
minMergedPRs: Number(entry.min_merged_prs || 0),
}));
const contributorTierColor = String(json.contributor_tier_color || "").toUpperCase();
if (!contributorTierColor || contributorTierRules.length === 0) {
return fallback;
}
return { contributorTierColor, contributorTierRules };
} catch (error) {
core.warning(`failed to load .github/label-policy.json, using fallback policy: ${error.message}`);
return fallback;
}
}
const { contributorTierColor, contributorTierRules } = await loadContributorTierPolicy();
const contributorTierLabels = contributorTierRules.map((rule) => rule.label);
const managedPathLabels = [
"docs",
"dependencies",
"ci",
"core",
"agent",
"channel",
"config",
"cron",
"daemon",
"doctor",
"gateway",
"health",
"heartbeat",
"integration",
"memory",
"observability",
"onboard",
"provider",
"runtime",
"security",
"service",
"skillforge",
"skills",
"tool",
"tunnel",
"tests",
"scripts",
"dev",
];
const managedPathLabelSet = new Set(managedPathLabels);
const moduleNamespaceRules = [
{ root: "src/agent/", prefix: "agent", coreEntries: new Set(["mod.rs"]) },
{ root: "src/channels/", prefix: "channel", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/config/", prefix: "config", coreEntries: new Set(["mod.rs", "schema.rs"]) },
{ root: "src/cron/", prefix: "cron", coreEntries: new Set(["mod.rs"]) },
{ root: "src/daemon/", prefix: "daemon", coreEntries: new Set(["mod.rs"]) },
{ root: "src/doctor/", prefix: "doctor", coreEntries: new Set(["mod.rs"]) },
{ root: "src/gateway/", prefix: "gateway", coreEntries: new Set(["mod.rs"]) },
{ root: "src/health/", prefix: "health", coreEntries: new Set(["mod.rs"]) },
{ root: "src/heartbeat/", prefix: "heartbeat", coreEntries: new Set(["mod.rs"]) },
{ root: "src/integrations/", prefix: "integration", coreEntries: new Set(["mod.rs", "registry.rs"]) },
{ root: "src/memory/", prefix: "memory", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/observability/", prefix: "observability", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/onboard/", prefix: "onboard", coreEntries: new Set(["mod.rs"]) },
{ root: "src/providers/", prefix: "provider", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/runtime/", prefix: "runtime", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/security/", prefix: "security", coreEntries: new Set(["mod.rs"]) },
{ root: "src/service/", prefix: "service", coreEntries: new Set(["mod.rs"]) },
{ root: "src/skillforge/", prefix: "skillforge", coreEntries: new Set(["mod.rs"]) },
{ root: "src/skills/", prefix: "skills", coreEntries: new Set(["mod.rs"]) },
{ root: "src/tools/", prefix: "tool", coreEntries: new Set(["mod.rs", "traits.rs"]) },
{ root: "src/tunnel/", prefix: "tunnel", coreEntries: new Set(["mod.rs"]) },
];
const managedModulePrefixes = [...new Set(moduleNamespaceRules.map((rule) => `${rule.prefix}:`))];
const orderedOtherLabelStyles = [
{ label: "health", color: "8EC9B8" },
{ label: "tool", color: "7FC4B6" },
{ label: "agent", color: "86C4A2" },
{ label: "memory", color: "8FCB99" },
{ label: "channel", color: "7EB6F2" },
{ label: "service", color: "95C7B6" },
{ label: "integration", color: "8DC9AE" },
{ label: "tunnel", color: "9FC8B3" },
{ label: "config", color: "AABCD0" },
{ label: "observability", color: "84C9D0" },
{ label: "docs", color: "8FBBE0" },
{ label: "dev", color: "B9C1CC" },
{ label: "tests", color: "9DC8C7" },
{ label: "skills", color: "BFC89B" },
{ label: "skillforge", color: "C9C39B" },
{ label: "provider", color: "958DF0" },
{ label: "runtime", color: "A3ADD8" },
{ label: "heartbeat", color: "C0C88D" },
{ label: "daemon", color: "C8C498" },
{ label: "doctor", color: "C1CF9D" },
{ label: "onboard", color: "D2BF86" },
{ label: "cron", color: "D2B490" },
{ label: "ci", color: "AEB4CE" },
{ label: "dependencies", color: "9FB1DE" },
{ label: "gateway", color: "B5A8E5" },
{ label: "security", color: "E58D85" },
{ label: "core", color: "C8A99B" },
{ label: "scripts", color: "C9B49F" },
];
const otherLabelDisplayOrder = orderedOtherLabelStyles.map((entry) => entry.label);
const modulePrefixSet = new Set(moduleNamespaceRules.map((rule) => rule.prefix));
const modulePrefixPriority = otherLabelDisplayOrder.filter((label) => modulePrefixSet.has(label));
const pathLabelPriority = [...otherLabelDisplayOrder];
const riskDisplayOrder = ["risk: high", "risk: medium", "risk: low", "risk: manual"];
const sizeDisplayOrder = ["size: XS", "size: S", "size: M", "size: L", "size: XL"];
const contributorDisplayOrder = [
"distinguished contributor",
"principal contributor",
"experienced contributor",
"trusted contributor",
];
const modulePrefixPriorityIndex = new Map(
modulePrefixPriority.map((prefix, index) => [prefix, index])
);
const pathLabelPriorityIndex = new Map(
pathLabelPriority.map((label, index) => [label, index])
);
const riskPriorityIndex = new Map(
riskDisplayOrder.map((label, index) => [label, index])
);
const sizePriorityIndex = new Map(
sizeDisplayOrder.map((label, index) => [label, index])
);
const contributorPriorityIndex = new Map(
contributorDisplayOrder.map((label, index) => [label, index])
);
const otherLabelColors = Object.fromEntries(
orderedOtherLabelStyles.map((entry) => [entry.label, entry.color])
);
const staticLabelColors = {
"size: XS": "E7CDD3",
"size: S": "E1BEC7",
"size: M": "DBB0BB",
"size: L": "D4A2AF",
"size: XL": "CE94A4",
"risk: low": "97D3A6",
"risk: medium": "E4C47B",
"risk: high": "E98E88",
"risk: manual": "B7A4E0",
...otherLabelColors,
};
const staticLabelDescriptions = {
"size: XS": "Auto size: <=80 non-doc changed lines.",
"size: S": "Auto size: 81-250 non-doc changed lines.",
"size: M": "Auto size: 251-500 non-doc changed lines.",
"size: L": "Auto size: 501-1000 non-doc changed lines.",
"size: XL": "Auto size: >1000 non-doc changed lines.",
"risk: low": "Auto risk: docs/chore-only paths.",
"risk: medium": "Auto risk: src/** or dependency/config changes.",
"risk: high": "Auto risk: security/runtime/gateway/tools/workflows.",
"risk: manual": "Maintainer override: keep selected risk label.",
docs: "Auto scope: docs/markdown/template files changed.",
dependencies: "Auto scope: dependency manifest/lock/policy changed.",
ci: "Auto scope: CI/workflow/hook files changed.",
core: "Auto scope: root src/*.rs files changed.",
agent: "Auto scope: src/agent/** changed.",
channel: "Auto scope: src/channels/** changed.",
config: "Auto scope: src/config/** changed.",
cron: "Auto scope: src/cron/** changed.",
daemon: "Auto scope: src/daemon/** changed.",
doctor: "Auto scope: src/doctor/** changed.",
gateway: "Auto scope: src/gateway/** changed.",
health: "Auto scope: src/health/** changed.",
heartbeat: "Auto scope: src/heartbeat/** changed.",
integration: "Auto scope: src/integrations/** changed.",
memory: "Auto scope: src/memory/** changed.",
observability: "Auto scope: src/observability/** changed.",
onboard: "Auto scope: src/onboard/** changed.",
provider: "Auto scope: src/providers/** changed.",
runtime: "Auto scope: src/runtime/** changed.",
security: "Auto scope: src/security/** changed.",
service: "Auto scope: src/service/** changed.",
skillforge: "Auto scope: src/skillforge/** changed.",
skills: "Auto scope: src/skills/** changed.",
tool: "Auto scope: src/tools/** changed.",
tunnel: "Auto scope: src/tunnel/** changed.",
tests: "Auto scope: tests/** changed.",
scripts: "Auto scope: scripts/** changed.",
dev: "Auto scope: dev/** changed.",
};
for (const label of contributorTierLabels) {
staticLabelColors[label] = contributorTierColor;
const rule = contributorTierRules.find((entry) => entry.label === label);
if (rule) {
staticLabelDescriptions[label] = `Contributor with ${rule.minMergedPRs}+ merged PRs.`;
}
}
const modulePrefixColors = Object.fromEntries(
modulePrefixPriority.map((prefix) => [
`${prefix}:`,
otherLabelColors[prefix] || "BFDADC",
])
);
const providerKeywordHints = [
"deepseek",
"moonshot",
"kimi",
"qwen",
"mistral",
"doubao",
"baichuan",
"yi",
"siliconflow",
"vertex",
"azure",
"perplexity",
"venice",
"vercel",
"cloudflare",
"synthetic",
"opencode",
"zai",
"glm",
"minimax",
"bedrock",
"qianfan",
"groq",
"together",
"fireworks",
"cohere",
"openai",
"openrouter",
"anthropic",
"gemini",
"ollama",
];
const channelKeywordHints = [
"telegram",
"discord",
"slack",
"whatsapp",
"matrix",
"irc",
"imessage",
"email",
"cli",
];
function isDocsLike(path) {
return (
path.startsWith("docs/") ||
path.endsWith(".md") ||
path.endsWith(".mdx") ||
path === "LICENSE" ||
path === ".markdownlint-cli2.yaml" ||
path === ".github/pull_request_template.md" ||
path.startsWith(".github/ISSUE_TEMPLATE/")
);
}
function normalizeLabelSegment(segment) {
return (segment || "")
.toLowerCase()
.replace(/\.rs$/g, "")
.replace(/[^a-z0-9_-]+/g, "-")
.replace(/^[-_]+|[-_]+$/g, "")
.slice(0, 40);
}
function containsKeyword(text, keyword) {
const escaped = keyword.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
const pattern = new RegExp(`(^|[^a-z0-9_])${escaped}([^a-z0-9_]|$)`, "i");
return pattern.test(text);
}
function formatModuleLabel(prefix, segment) {
return `${prefix}: ${segment}`;
}
function parseModuleLabel(label) {
if (typeof label !== "string") return null;
const match = label.match(/^([^:]+):\s*(.+)$/);
if (!match) return null;
const prefix = match[1].trim().toLowerCase();
const segment = (match[2] || "").trim().toLowerCase();
if (!prefix || !segment) return null;
return { prefix, segment };
}
function sortByPriority(labels, priorityIndex) {
return [...new Set(labels)].sort((left, right) => {
const leftPriority = priorityIndex.has(left) ? priorityIndex.get(left) : Number.MAX_SAFE_INTEGER;
const rightPriority = priorityIndex.has(right)
? priorityIndex.get(right)
: Number.MAX_SAFE_INTEGER;
if (leftPriority !== rightPriority) return leftPriority - rightPriority;
return left.localeCompare(right);
});
}
function sortModuleLabels(labels) {
return [...new Set(labels)].sort((left, right) => {
const leftParsed = parseModuleLabel(left);
const rightParsed = parseModuleLabel(right);
if (!leftParsed || !rightParsed) return left.localeCompare(right);
const leftPrefixPriority = modulePrefixPriorityIndex.has(leftParsed.prefix)
? modulePrefixPriorityIndex.get(leftParsed.prefix)
: Number.MAX_SAFE_INTEGER;
const rightPrefixPriority = modulePrefixPriorityIndex.has(rightParsed.prefix)
? modulePrefixPriorityIndex.get(rightParsed.prefix)
: Number.MAX_SAFE_INTEGER;
if (leftPrefixPriority !== rightPrefixPriority) {
return leftPrefixPriority - rightPrefixPriority;
}
if (leftParsed.prefix !== rightParsed.prefix) {
return leftParsed.prefix.localeCompare(rightParsed.prefix);
}
const leftIsCore = leftParsed.segment === "core";
const rightIsCore = rightParsed.segment === "core";
if (leftIsCore !== rightIsCore) return leftIsCore ? 1 : -1;
return leftParsed.segment.localeCompare(rightParsed.segment);
});
}
function refineModuleLabels(rawLabels) {
const refined = new Set(rawLabels);
const segmentsByPrefix = new Map();
for (const label of rawLabels) {
const parsed = parseModuleLabel(label);
if (!parsed) continue;
if (!segmentsByPrefix.has(parsed.prefix)) {
segmentsByPrefix.set(parsed.prefix, new Set());
}
segmentsByPrefix.get(parsed.prefix).add(parsed.segment);
}
for (const [prefix, segments] of segmentsByPrefix) {
const hasSpecificSegment = [...segments].some((segment) => segment !== "core");
if (hasSpecificSegment) {
refined.delete(formatModuleLabel(prefix, "core"));
}
}
return refined;
}
function compactModuleLabels(labels) {
const groupedSegments = new Map();
const compactedModuleLabels = new Set();
const forcePathPrefixes = new Set();
for (const label of labels) {
const parsed = parseModuleLabel(label);
if (!parsed) {
compactedModuleLabels.add(label);
continue;
}
if (!groupedSegments.has(parsed.prefix)) {
groupedSegments.set(parsed.prefix, new Set());
}
groupedSegments.get(parsed.prefix).add(parsed.segment);
}
for (const [prefix, segments] of groupedSegments) {
const uniqueSegments = [...new Set([...segments].filter(Boolean))];
if (uniqueSegments.length === 0) continue;
if (uniqueSegments.length === 1) {
compactedModuleLabels.add(formatModuleLabel(prefix, uniqueSegments[0]));
} else {
forcePathPrefixes.add(prefix);
}
}
return {
moduleLabels: compactedModuleLabels,
forcePathPrefixes,
};
}
function colorForLabel(label) {
if (staticLabelColors[label]) return staticLabelColors[label];
const matchedPrefix = Object.keys(modulePrefixColors).find((prefix) => label.startsWith(prefix));
if (matchedPrefix) return modulePrefixColors[matchedPrefix];
return "BFDADC";
}
function descriptionForLabel(label) {
if (staticLabelDescriptions[label]) return staticLabelDescriptions[label];
const parsed = parseModuleLabel(label);
if (parsed) {
if (parsed.segment === "core") {
return `Auto module: ${parsed.prefix} core files changed.`;
}
return `Auto module: ${parsed.prefix}/${parsed.segment} changed.`;
}
return "Auto-managed label.";
}
async function ensureLabel(name, existing = null) {
const expectedColor = colorForLabel(name);
const expectedDescription = descriptionForLabel(name);
try {
const current = existing || (await github.rest.issues.getLabel({ owner, repo, name })).data;
const currentColor = (current.color || "").toUpperCase();
const currentDescription = (current.description || "").trim();
if (currentColor !== expectedColor || currentDescription !== expectedDescription) {
await github.rest.issues.updateLabel({
owner,
repo,
name,
new_name: name,
color: expectedColor,
description: expectedDescription,
});
}
} catch (error) {
if (error.status !== 404) throw error;
await github.rest.issues.createLabel({
owner,
repo,
name,
color: expectedColor,
description: expectedDescription,
});
}
}
function isManagedLabel(label) {
if (label === manualRiskOverrideLabel) return true;
if (sizeLabels.includes(label) || computedRiskLabels.includes(label)) return true;
if (managedPathLabelSet.has(label)) return true;
if (contributorTierLabels.includes(label)) return true;
if (managedModulePrefixes.some((prefix) => label.startsWith(prefix))) return true;
return false;
}
async function ensureManagedRepoLabelsMetadata() {
const repoLabels = await github.paginate(github.rest.issues.listLabelsForRepo, {
owner,
repo,
per_page: 100,
});
for (const existingLabel of repoLabels) {
const labelName = existingLabel.name || "";
if (!isManagedLabel(labelName)) continue;
await ensureLabel(labelName, existingLabel);
}
}
function selectContributorTier(mergedCount) {
const matchedTier = contributorTierRules.find((rule) => mergedCount >= rule.minMergedPRs);
return matchedTier ? matchedTier.label : null;
}
if (context.eventName === "workflow_dispatch") {
const mode = (context.payload.inputs?.mode || "audit").toLowerCase();
const shouldRepair = mode === "repair";
const repoLabels = await github.paginate(github.rest.issues.listLabelsForRepo, {
owner,
repo,
per_page: 100,
});
let managedScanned = 0;
const drifts = [];
for (const existingLabel of repoLabels) {
const labelName = existingLabel.name || "";
if (!isManagedLabel(labelName)) continue;
managedScanned += 1;
const expectedColor = colorForLabel(labelName);
const expectedDescription = descriptionForLabel(labelName);
const currentColor = (existingLabel.color || "").toUpperCase();
const currentDescription = (existingLabel.description || "").trim();
if (currentColor !== expectedColor || currentDescription !== expectedDescription) {
drifts.push({
name: labelName,
currentColor,
expectedColor,
currentDescription,
expectedDescription,
});
if (shouldRepair) {
await ensureLabel(labelName, existingLabel);
}
}
}
core.summary
.addHeading("Managed Label Governance", 2)
.addRaw(`Mode: ${shouldRepair ? "repair" : "audit"}`)
.addEOL()
.addRaw(`Managed labels scanned: ${managedScanned}`)
.addEOL()
.addRaw(`Drifts found: ${drifts.length}`)
.addEOL();
if (drifts.length > 0) {
const sample = drifts.slice(0, 30).map((entry) => [
entry.name,
`${entry.currentColor} -> ${entry.expectedColor}`,
`${entry.currentDescription || "(blank)"} -> ${entry.expectedDescription}`,
]);
core.summary.addTable([
[{ data: "Label", header: true }, { data: "Color", header: true }, { data: "Description", header: true }],
...sample,
]);
if (drifts.length > sample.length) {
core.summary
.addRaw(`Additional drifts not shown: ${drifts.length - sample.length}`)
.addEOL();
}
}
await core.summary.write();
if (!shouldRepair && drifts.length > 0) {
core.info(`Managed-label metadata drifts detected: ${drifts.length}. Re-run with mode=repair to auto-fix.`);
} else if (shouldRepair) {
core.info(`Managed-label metadata repair applied to ${drifts.length} labels.`);
} else {
core.info("No managed-label metadata drift detected.");
}
return;
}
const files = await github.paginate(github.rest.pulls.listFiles, {
owner,
repo,
pull_number: pr.number,
per_page: 100,
});
const detectedModuleLabels = new Set();
for (const file of files) {
const path = (file.filename || "").toLowerCase();
for (const rule of moduleNamespaceRules) {
if (!path.startsWith(rule.root)) continue;
const relative = path.slice(rule.root.length);
if (!relative) continue;
const first = relative.split("/")[0];
const firstStem = first.endsWith(".rs") ? first.slice(0, -3) : first;
let segment = firstStem;
if (rule.coreEntries.has(first) || rule.coreEntries.has(firstStem)) {
segment = "core";
}
segment = normalizeLabelSegment(segment);
if (!segment) continue;
detectedModuleLabels.add(formatModuleLabel(rule.prefix, segment));
}
}
const providerRelevantFiles = files.filter((file) => {
const path = file.filename || "";
return (
path.startsWith("src/providers/") ||
path.startsWith("src/integrations/") ||
path.startsWith("src/onboard/") ||
path.startsWith("src/config/")
);
});
if (providerRelevantFiles.length > 0) {
const searchableText = [
pr.title || "",
pr.body || "",
...providerRelevantFiles.map((file) => file.filename || ""),
...providerRelevantFiles.map((file) => file.patch || ""),
]
.join("\n")
.toLowerCase();
for (const keyword of providerKeywordHints) {
if (containsKeyword(searchableText, keyword)) {
detectedModuleLabels.add(formatModuleLabel("provider", keyword));
}
}
}
const channelRelevantFiles = files.filter((file) => {
const path = file.filename || "";
return (
path.startsWith("src/channels/") ||
path.startsWith("src/onboard/") ||
path.startsWith("src/config/")
);
});
if (channelRelevantFiles.length > 0) {
const searchableText = [
pr.title || "",
pr.body || "",
...channelRelevantFiles.map((file) => file.filename || ""),
...channelRelevantFiles.map((file) => file.patch || ""),
]
.join("\n")
.toLowerCase();
for (const keyword of channelKeywordHints) {
if (containsKeyword(searchableText, keyword)) {
detectedModuleLabels.add(formatModuleLabel("channel", keyword));
}
}
}
const refinedModuleLabels = refineModuleLabels(detectedModuleLabels);
const compactedModuleState = compactModuleLabels(refinedModuleLabels);
const selectedModuleLabels = compactedModuleState.moduleLabels;
const forcePathPrefixes = compactedModuleState.forcePathPrefixes;
const modulePrefixesWithLabels = new Set(
[...selectedModuleLabels]
.map((label) => parseModuleLabel(label)?.prefix)
.filter(Boolean)
);
const { data: currentLabels } = await github.rest.issues.listLabelsOnIssue({
owner,
repo,
issue_number: pr.number,
});
const currentLabelNames = currentLabels.map((label) => label.name);
const currentPathLabels = currentLabelNames.filter((label) => managedPathLabelSet.has(label));
const candidatePathLabels = new Set([...currentPathLabels, ...forcePathPrefixes]);
const dedupedPathLabels = [...candidatePathLabels].filter((label) => {
if (label === "core") return true;
if (forcePathPrefixes.has(label)) return true;
return !modulePrefixesWithLabels.has(label);
});
const excludedLockfiles = new Set(["Cargo.lock"]);
const changedLines = files.reduce((total, file) => {
const path = file.filename || "";
if (isDocsLike(path) || excludedLockfiles.has(path)) {
return total;
}
return total + (file.additions || 0) + (file.deletions || 0);
}, 0);
let sizeLabel = "size: XL";
if (changedLines <= 80) sizeLabel = "size: XS";
else if (changedLines <= 250) sizeLabel = "size: S";
else if (changedLines <= 500) sizeLabel = "size: M";
else if (changedLines <= 1000) sizeLabel = "size: L";
const hasHighRiskPath = files.some((file) => {
const path = file.filename || "";
return (
path.startsWith("src/security/") ||
path.startsWith("src/runtime/") ||
path.startsWith("src/gateway/") ||
path.startsWith("src/tools/") ||
path.startsWith(".github/workflows/")
);
});
const hasMediumRiskPath = files.some((file) => {
const path = file.filename || "";
return (
path.startsWith("src/") ||
path === "Cargo.toml" ||
path === "Cargo.lock" ||
path === "deny.toml" ||
path.startsWith(".githooks/")
);
});
let riskLabel = "risk: low";
if (hasHighRiskPath) {
riskLabel = "risk: high";
} else if (hasMediumRiskPath) {
riskLabel = "risk: medium";
}
await ensureManagedRepoLabelsMetadata();
const labelsToEnsure = new Set([
...sizeLabels,
...computedRiskLabels,
manualRiskOverrideLabel,
...managedPathLabels,
...contributorTierLabels,
...selectedModuleLabels,
]);
for (const label of labelsToEnsure) {
await ensureLabel(label);
}
let contributorTierLabel = null;
const authorLogin = pr.user?.login;
if (authorLogin && pr.user?.type !== "Bot") {
try {
const { data: mergedSearch } = await github.rest.search.issuesAndPullRequests({
q: `repo:${owner}/${repo} is:pr is:merged author:${authorLogin}`,
per_page: 1,
});
const mergedCount = mergedSearch.total_count || 0;
contributorTierLabel = selectContributorTier(mergedCount);
} catch (error) {
core.warning(`failed to compute contributor tier label: ${error.message}`);
}
}
const hasManualRiskOverride = currentLabelNames.includes(manualRiskOverrideLabel);
const keepNonManagedLabels = currentLabelNames.filter((label) => {
if (label === manualRiskOverrideLabel) return true;
if (contributorTierLabels.includes(label)) return false;
if (sizeLabels.includes(label) || computedRiskLabels.includes(label)) return false;
if (managedPathLabelSet.has(label)) return false;
if (managedModulePrefixes.some((prefix) => label.startsWith(prefix))) return false;
return true;
});
const manualRiskSelection =
currentLabelNames.find((label) => computedRiskLabels.includes(label)) || riskLabel;
const moduleLabelList = sortModuleLabels([...selectedModuleLabels]);
const contributorLabelList = contributorTierLabel ? [contributorTierLabel] : [];
const selectedRiskLabels = hasManualRiskOverride
? sortByPriority([manualRiskSelection, manualRiskOverrideLabel], riskPriorityIndex)
: sortByPriority([riskLabel], riskPriorityIndex);
const selectedSizeLabels = sortByPriority([sizeLabel], sizePriorityIndex);
const sortedContributorLabels = sortByPriority(contributorLabelList, contributorPriorityIndex);
const sortedPathLabels = sortByPriority(dedupedPathLabels, pathLabelPriorityIndex);
const sortedKeepNonManagedLabels = [...new Set(keepNonManagedLabels)].sort((left, right) =>
left.localeCompare(right)
);
const nextLabels = [
...new Set([
...selectedRiskLabels,
...selectedSizeLabels,
...sortedContributorLabels,
...moduleLabelList,
...sortedPathLabels,
...sortedKeepNonManagedLabels,
]),
];
await github.rest.issues.setLabels({
owner,
repo,
issue_number: pr.number,
labels: nextLabels,
});

184
.github/workflows/pr-hygiene.yml vendored Normal file
View file

@ -0,0 +1,184 @@
name: PR Hygiene
on:
schedule:
- cron: "15 */12 * * *"
workflow_dispatch:
permissions: {}
concurrency:
group: pr-hygiene
cancel-in-progress: true
jobs:
nudge-stale-prs:
runs-on: blacksmith-2vcpu-ubuntu-2404
permissions:
contents: read
pull-requests: write
issues: write
env:
STALE_HOURS: "48"
steps:
- name: Nudge PRs that need rebase or CI refresh
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const staleHours = Number(process.env.STALE_HOURS || "48");
const ignoreLabels = new Set(["no-stale", "stale", "maintainer", "no-pr-hygiene"]);
const marker = "<!-- pr-hygiene-nudge -->";
const owner = context.repo.owner;
const repo = context.repo.repo;
const openPrs = await github.paginate(github.rest.pulls.list, {
owner,
repo,
state: "open",
per_page: 100,
});
const activePrs = openPrs.filter((pr) => {
if (pr.draft) {
return false;
}
const labels = new Set((pr.labels || []).map((label) => label.name));
return ![...ignoreLabels].some((label) => labels.has(label));
});
core.info(`Scanning ${activePrs.length} open PR(s) for hygiene nudges.`);
let nudged = 0;
let skipped = 0;
for (const pr of activePrs) {
const { data: headCommit } = await github.rest.repos.getCommit({
owner,
repo,
ref: pr.head.sha,
});
const headCommitAt =
headCommit.commit?.committer?.date || headCommit.commit?.author?.date;
if (!headCommitAt) {
skipped += 1;
core.info(`#${pr.number}: missing head commit timestamp, skipping.`);
continue;
}
const ageHours = (Date.now() - new Date(headCommitAt).getTime()) / 3600000;
if (ageHours < staleHours) {
skipped += 1;
continue;
}
const { data: prDetail } = await github.rest.pulls.get({
owner,
repo,
pull_number: pr.number,
});
const isBehindBase = prDetail.mergeable_state === "behind";
const { data: checkRunsData } = await github.rest.checks.listForRef({
owner,
repo,
ref: pr.head.sha,
per_page: 100,
});
const ciGateRuns = (checkRunsData.check_runs || [])
.filter((run) => run.name === "CI Required Gate")
.sort((a, b) => {
const aTime = new Date(a.started_at || a.completed_at || a.created_at).getTime();
const bTime = new Date(b.started_at || b.completed_at || b.created_at).getTime();
return bTime - aTime;
});
let ciState = "missing";
if (ciGateRuns.length > 0) {
const latest = ciGateRuns[0];
if (latest.status !== "completed") {
ciState = "in_progress";
} else if (["success", "neutral", "skipped"].includes(latest.conclusion || "")) {
ciState = "success";
} else {
ciState = String(latest.conclusion || "failure");
}
}
const ciMissing = ciState === "missing";
const ciFailing = !["success", "in_progress", "missing"].includes(ciState);
if (!isBehindBase && !ciMissing && !ciFailing) {
skipped += 1;
continue;
}
const reasons = [];
if (isBehindBase) {
reasons.push("- Branch is behind `main` (please rebase or merge the latest base branch).");
}
if (ciMissing) {
reasons.push("- No `CI Required Gate` run was found for the current head commit.");
}
if (ciFailing) {
reasons.push(`- Latest \`CI Required Gate\` result is \`${ciState}\`.`);
}
const shortSha = pr.head.sha.slice(0, 12);
const body = [
marker,
`Hi @${pr.user.login}, friendly automation nudge from PR hygiene.`,
"",
`This PR has had no new commits for **${Math.floor(ageHours)}h** and still needs an update before merge:`,
"",
...reasons,
"",
"### Recommended next steps",
"1. Rebase your branch on `main`.",
"2. Push the updated branch and re-run checks (or use **Re-run failed jobs**).",
"3. Post fresh validation output in this PR thread.",
"",
"Maintainers: apply `no-stale` to opt out for accepted-but-blocked work.",
`Head SHA: \`${shortSha}\``,
].join("\n");
const { data: comments } = await github.rest.issues.listComments({
owner,
repo,
issue_number: pr.number,
per_page: 100,
});
const existing = comments.find(
(comment) => comment.user?.type === "Bot" && comment.body?.includes(marker),
);
if (existing) {
if (existing.body === body) {
skipped += 1;
continue;
}
await github.rest.issues.updateComment({
owner,
repo,
comment_id: existing.id,
body,
});
} else {
await github.rest.issues.createComment({
owner,
repo,
issue_number: pr.number,
body,
});
}
nudged += 1;
core.info(`#${pr.number}: hygiene nudge posted/updated.`);
}
core.info(`Done. Nudged=${nudged}, skipped=${skipped}`);

179
.github/workflows/pr-intake-sanity.yml vendored Normal file
View file

@ -0,0 +1,179 @@
name: PR Intake Sanity
on:
pull_request_target:
types: [opened, reopened, synchronize, edited, ready_for_review]
concurrency:
group: pr-intake-sanity-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
issues: write
jobs:
intake:
name: Intake Sanity
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 10
steps:
- name: Run safe PR intake checks
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const pr = context.payload.pull_request;
if (!pr) return;
const marker = "<!-- pr-intake-sanity -->";
const requiredSections = [
"## Summary",
"## Validation Evidence (required)",
"## Security Impact (required)",
"## Privacy and Data Hygiene (required)",
"## Rollback Plan (required)",
];
const body = pr.body || "";
const missingSections = requiredSections.filter((section) => !body.includes(section));
const missingFields = [];
const requiredFieldChecks = [
["summary problem", /- Problem:\s*\S+/m],
["summary why it matters", /- Why it matters:\s*\S+/m],
["summary what changed", /- What changed:\s*\S+/m],
["validation commands", /Commands and result summary:\s*[\s\S]*```/m],
["security risk/mitigation", /- New permissions\/capabilities\?\s*\(`Yes\/No`\):\s*\S+/m],
["privacy status", /- Data-hygiene status\s*\(`pass\|needs-follow-up`\):\s*\S+/m],
["rollback plan", /- Fast rollback command\/path:\s*\S+/m],
];
for (const [name, pattern] of requiredFieldChecks) {
if (!pattern.test(body)) {
missingFields.push(name);
}
}
const files = await github.paginate(github.rest.pulls.listFiles, {
owner,
repo,
pull_number: pr.number,
per_page: 100,
});
const formatProblems = [];
for (const file of files) {
const patch = file.patch || "";
if (!patch) continue;
const lines = patch.split("\n");
for (let idx = 0; idx < lines.length; idx += 1) {
const line = lines[idx];
if (!line.startsWith("+") || line.startsWith("+++")) continue;
const added = line.slice(1);
const lineNo = idx + 1;
if (/\t/.test(added)) {
formatProblems.push(`${file.filename}:patch#${lineNo} contains tab characters`);
}
if (/[ \t]+$/.test(added)) {
formatProblems.push(`${file.filename}:patch#${lineNo} contains trailing whitespace`);
}
if (/^(<<<<<<<|=======|>>>>>>>)/.test(added)) {
formatProblems.push(`${file.filename}:patch#${lineNo} contains merge conflict markers`);
}
}
}
const workflowFilesChanged = files
.map((file) => file.filename)
.filter((name) => name.startsWith(".github/workflows/"));
const failures = [];
if (missingSections.length > 0) {
failures.push(`Missing required PR template sections: ${missingSections.join(", ")}`);
}
if (missingFields.length > 0) {
failures.push(`Incomplete required PR template fields: ${missingFields.join(", ")}`);
}
if (formatProblems.length > 0) {
failures.push(`Formatting/safety issues in added lines (${formatProblems.length})`);
}
const comments = await github.paginate(github.rest.issues.listComments, {
owner,
repo,
issue_number: pr.number,
per_page: 100,
});
const existing = comments.find((comment) => (comment.body || "").includes(marker));
if (failures.length === 0) {
if (existing) {
await github.rest.issues.deleteComment({
owner,
repo,
comment_id: existing.id,
});
}
core.info("PR intake sanity checks passed.");
return;
}
const runUrl = `${context.serverUrl}/${owner}/${repo}/actions/runs/${context.runId}`;
const details = [];
if (formatProblems.length > 0) {
details.push(...formatProblems.slice(0, 20).map((entry) => `- ${entry}`));
if (formatProblems.length > 20) {
details.push(`- ...and ${formatProblems.length - 20} more issue(s)`);
}
}
const ownerApprovalNote = workflowFilesChanged.length > 0
? [
"",
"Workflow files changed in this PR:",
...workflowFilesChanged.map((name) => `- \`${name}\``),
"",
"Reminder: workflow changes require owner approval via `CI Required Gate`.",
].join("\n")
: "";
const commentBody = [
marker,
"### PR intake checks failed",
"",
"Fast safe checks ran before full CI and found issues:",
...failures.map((entry) => `- ${entry}`),
"",
"Action items:",
"1. Complete the required PR template sections/fields.",
"2. Remove tabs, trailing whitespace, and conflict markers from added lines.",
"3. Re-run local checks before pushing:",
" - `./scripts/ci/rust_quality_gate.sh`",
" - `./scripts/ci/rust_strict_delta_gate.sh`",
" - `./scripts/ci/docs_quality_gate.sh`",
"",
`Run logs: ${runUrl}`,
"",
"Detected line issues (sample):",
...(details.length > 0 ? details : ["- none"]),
ownerApprovalNote,
].join("\n");
if (existing) {
await github.rest.issues.updateComment({
owner,
repo,
comment_id: existing.id,
body: commentBody,
});
} else {
await github.rest.issues.createComment({
owner,
repo,
issue_number: pr.number,
body: commentBody,
});
}
core.setFailed("PR intake sanity checks failed. See sticky comment for details.");

View file

@ -1,90 +1,117 @@
name: Release name: Release
on: on:
push: push:
tags: ["v*"] tags: ["v*"]
permissions: permissions:
contents: write contents: write
id-token: write # Required for cosign keyless signing via OIDC
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
jobs: jobs:
build-release: build-release:
name: Build ${{ matrix.target }} name: Build ${{ matrix.target }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: timeout-minutes: 40
matrix: strategy:
include: fail-fast: false
- os: ubuntu-latest matrix:
target: x86_64-unknown-linux-gnu include:
artifact: zeroclaw - os: ubuntu-latest
- os: macos-latest target: blacksmith-2vcpu-ubuntu-2404
target: x86_64-apple-darwin artifact: zeroclaw
artifact: zeroclaw - os: macos-latest
- os: macos-latest target: x86_64-apple-darwin
target: aarch64-apple-darwin artifact: zeroclaw
artifact: zeroclaw - os: macos-latest
- os: windows-latest target: aarch64-apple-darwin
target: x86_64-pc-windows-msvc artifact: zeroclaw
artifact: zeroclaw.exe - os: windows-latest
target: x86_64-pc-windows-msvc
artifact: zeroclaw.exe
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with: with:
targets: ${{ matrix.target }} targets: ${{ matrix.target }}
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Build release - name: Build release
run: cargo build --release --target ${{ matrix.target }} run: cargo build --release --locked --target ${{ matrix.target }}
- name: Check binary size (Unix) - name: Check binary size (Unix)
if: runner.os != 'Windows' if: runner.os != 'Windows'
run: | run: |
SIZE=$(stat -f%z target/${{ matrix.target }}/release/${{ matrix.artifact }} 2>/dev/null || stat -c%s target/${{ matrix.target }}/release/${{ matrix.artifact }}) SIZE=$(stat -f%z target/${{ matrix.target }}/release/${{ matrix.artifact }} 2>/dev/null || stat -c%s target/${{ matrix.target }}/release/${{ matrix.artifact }})
echo "Binary size: $((SIZE / 1024 / 1024))MB ($SIZE bytes)" echo "Binary size: $((SIZE / 1024 / 1024))MB ($SIZE bytes)"
if [ "$SIZE" -gt 5242880 ]; then if [ "$SIZE" -gt 5242880 ]; then
echo "::warning::Binary exceeds 5MB target" echo "::warning::Binary exceeds 5MB target"
fi fi
- name: Package (Unix) - name: Package (Unix)
if: runner.os != 'Windows' if: runner.os != 'Windows'
run: | run: |
cd target/${{ matrix.target }}/release cd target/${{ matrix.target }}/release
tar czf ../../../zeroclaw-${{ matrix.target }}.tar.gz ${{ matrix.artifact }} tar czf ../../../zeroclaw-${{ matrix.target }}.tar.gz ${{ matrix.artifact }}
- name: Package (Windows) - name: Package (Windows)
if: runner.os == 'Windows' if: runner.os == 'Windows'
run: | run: |
cd target/${{ matrix.target }}/release cd target/${{ matrix.target }}/release
7z a ../../../zeroclaw-${{ matrix.target }}.zip ${{ matrix.artifact }} 7z a ../../../zeroclaw-${{ matrix.target }}.zip ${{ matrix.artifact }}
- name: Upload artifact - name: Upload artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
with: with:
name: zeroclaw-${{ matrix.target }} name: zeroclaw-${{ matrix.target }}
path: zeroclaw-${{ matrix.target }}.* path: zeroclaw-${{ matrix.target }}.*
publish: publish:
name: Publish Release name: Publish Release
needs: build-release needs: build-release
runs-on: ubuntu-latest runs-on: blacksmith-2vcpu-ubuntu-2404
steps: timeout-minutes: 15
- uses: actions/checkout@v4 steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Download all artifacts - name: Download all artifacts
uses: actions/download-artifact@v4 uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
with: with:
path: artifacts path: artifacts
- name: Create GitHub Release - name: Generate SHA256 checksums
uses: softprops/action-gh-release@v2 run: |
with: cd artifacts
generate_release_notes: true find . -type f \( -name '*.tar.gz' -o -name '*.zip' \) -exec sha256sum {} + | sed 's| \./[^/]*/| |' > SHA256SUMS
files: artifacts/**/* echo "Generated checksums:"
env: cat SHA256SUMS
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
- name: Sign artifacts with cosign (keyless)
run: |
for file in artifacts/**/*; do
[ -f "$file" ] || continue
cosign sign-blob --yes \
--oidc-issuer=https://token.actions.githubusercontent.com \
--output-signature="${file}.sig" \
--output-certificate="${file}.pem" \
"$file"
done
- name: Create GitHub Release
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
with:
generate_release_notes: true
files: |
artifacts/**/*
artifacts/SHA256SUMS
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

62
.github/workflows/rust-reusable.yml vendored Normal file
View file

@ -0,0 +1,62 @@
name: Rust Reusable Job
on:
workflow_call:
inputs:
run_command:
description: "Shell command(s) to execute."
required: true
type: string
timeout_minutes:
description: "Job timeout in minutes."
required: false
default: 20
type: number
toolchain:
description: "Rust toolchain channel/version."
required: false
default: "stable"
type: string
components:
description: "Optional rustup components."
required: false
default: ""
type: string
targets:
description: "Optional rustup targets."
required: false
default: ""
type: string
use_cache:
description: "Whether to enable rust-cache."
required: false
default: true
type: boolean
permissions:
contents: read
jobs:
run:
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: ${{ inputs.timeout_minutes }}
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
with:
toolchain: ${{ inputs.toolchain }}
components: ${{ inputs.components }}
targets: ${{ inputs.targets }}
- name: Restore Rust cache
if: inputs.use_cache
uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- name: Run command
shell: bash
run: |
set -euo pipefail
${{ inputs.run_command }}

View file

@ -1,37 +1,43 @@
name: Security Audit name: Rust Package Security Audit
on: on:
push: push:
branches: [main] branches: [main]
pull_request: pull_request:
branches: [main] branches: [main]
schedule: schedule:
- cron: "0 6 * * 1" # Weekly on Monday 6am UTC - cron: "0 6 * * 1" # Weekly on Monday 6am UTC
concurrency:
group: security-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
security-events: write
actions: read
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
jobs: jobs:
audit: audit:
name: Security Audit name: Security Audit
runs-on: ubuntu-latest uses: ./.github/workflows/rust-reusable.yml
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo-audit
run: cargo install cargo-audit
- name: Run cargo-audit
run: cargo audit
deny:
name: License & Supply Chain
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v2
with: with:
command: check advisories licenses sources timeout_minutes: 20
toolchain: stable
run_command: |
cargo install --locked cargo-audit --version 0.22.1
cargo audit
deny:
name: License & Supply Chain
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: EmbarkStudios/cargo-deny-action@3fd3802e88374d3fe9159b834c7714ec57d6c979 # v2
with:
command: check advisories licenses sources

44
.github/workflows/stale.yml vendored Normal file
View file

@ -0,0 +1,44 @@
name: Stale
on:
schedule:
- cron: "20 2 * * *"
workflow_dispatch:
permissions: {}
jobs:
stale:
permissions:
issues: write
pull-requests: write
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- name: Mark stale issues and pull requests
uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 21
days-before-issue-close: 7
days-before-pr-stale: 14
days-before-pr-close: 7
stale-issue-label: stale
stale-pr-label: stale
exempt-issue-labels: security,pinned,no-stale,no-pr-hygiene,maintainer
exempt-pr-labels: no-stale,no-pr-hygiene,maintainer
remove-stale-when-updated: true
exempt-all-assignees: true
operations-per-run: 300
stale-issue-message: |
This issue was automatically marked as stale due to inactivity.
Please provide an update, reproduction details, or current status to keep it open.
close-issue-message: |
Closing this issue due to inactivity.
If the problem still exists on the latest `main`, please open a new issue with fresh repro steps.
close-issue-reason: not_planned
stale-pr-message: |
This PR was automatically marked as stale due to inactivity.
Please rebase/update and post the latest validation results.
close-pr-message: |
Closing this PR due to inactivity.
Maintainers can reopen once the branch is updated and validation is provided.

116
.github/workflows/update-notice.yml vendored Normal file
View file

@ -0,0 +1,116 @@
name: Update Contributors NOTICE
on:
workflow_dispatch:
schedule:
# Run every Sunday at 00:00 UTC
- cron: '0 0 * * 0'
concurrency:
group: update-notice-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
pull-requests: write
jobs:
update-notice:
name: Update NOTICE with new contributors
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- name: Checkout repository
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Fetch contributors
id: contributors
env:
GH_TOKEN: ${{ github.token }}
run: |
# Fetch all contributors (excluding bots)
gh api \
--paginate \
"repos/${{ github.repository }}/contributors" \
--jq '.[] | select(.type != "Bot") | .login' > /tmp/contributors_raw.txt
# Sort alphabetically and filter
sort -f < /tmp/contributors_raw.txt > contributors.txt
# Count contributors
count=$(wc -l < contributors.txt | tr -d ' ')
echo "count=$count" >> "$GITHUB_OUTPUT"
- name: Generate new NOTICE file
run: |
cat > NOTICE << 'EOF'
ZeroClaw
Copyright 2025 ZeroClaw Labs
This product includes software developed at ZeroClaw Labs (https://github.com/zeroclaw-labs).
Contributors
============
The following individuals have contributed to ZeroClaw:
EOF
# Append contributors in alphabetical order
sed 's/^/- /' contributors.txt >> NOTICE
# Add third-party dependencies section
cat >> NOTICE << 'EOF'
Third-Party Dependencies
=========================
This project uses the following third-party libraries and components,
each licensed under their respective terms:
See Cargo.lock for a complete list of dependencies and their licenses.
EOF
- name: Check if NOTICE changed
id: check_diff
run: |
if git diff --quiet NOTICE; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Create Pull Request
if: steps.check_diff.outputs.changed == 'true'
env:
GH_TOKEN: ${{ github.token }}
COUNT: ${{ steps.contributors.outputs.count }}
run: |
branch_name="auto/update-notice-$(date +%Y%m%d)"
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git checkout -b "$branch_name"
git add NOTICE
git commit -m "chore(notice): update contributor list"
git push origin "$branch_name"
gh pr create \
--title "chore(notice): update contributor list" \
--body "Auto-generated update to NOTICE file with $COUNT contributors." \
--label "chore" \
--label "docs" \
--draft || true
- name: Summary
run: |
echo "## NOTICE Update Results" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
if [ "${{ steps.check_diff.outputs.changed }}" = "true" ]; then
echo "✅ PR created to update NOTICE" >> "$GITHUB_STEP_SUMMARY"
else
echo "✓ NOTICE file is up to date" >> "$GITHUB_STEP_SUMMARY"
fi
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "**Contributors:** ${{ steps.contributors.outputs.count }}" >> "$GITHUB_STEP_SUMMARY"

64
.github/workflows/workflow-sanity.yml vendored Normal file
View file

@ -0,0 +1,64 @@
name: Workflow Sanity
on:
pull_request:
paths:
- ".github/workflows/**"
- ".github/*.yml"
- ".github/*.yaml"
push:
paths:
- ".github/workflows/**"
- ".github/*.yml"
- ".github/*.yaml"
concurrency:
group: workflow-sanity-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
permissions:
contents: read
jobs:
no-tabs:
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Fail on tabs in workflow files
shell: bash
run: |
set -euo pipefail
python - <<'PY'
from __future__ import annotations
import pathlib
import sys
root = pathlib.Path(".github/workflows")
bad: list[str] = []
for path in sorted(root.rglob("*.yml")):
if b"\t" in path.read_bytes():
bad.append(str(path))
for path in sorted(root.rglob("*.yaml")):
if b"\t" in path.read_bytes():
bad.append(str(path))
if bad:
print("Tabs found in workflow file(s):")
for path in bad:
print(f"- {path}")
sys.exit(1)
PY
actionlint:
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Lint GitHub workflows
uses: rhysd/actionlint@393031adb9afb225ee52ae2ccd7a5af5525e03e8 # v1.7.11

25
.gitignore vendored
View file

@ -1,4 +1,29 @@
/target /target
firmware/*/target
*.db *.db
*.db-journal *.db-journal
.DS_Store .DS_Store
.wt-pr37/
__pycache__/
*.pyc
docker-compose.override.yml
# Environment files (may contain secrets)
.env
# Python virtual environments
.venv/
venv/
# ESP32 build cache (esp-idf-sys managed)
.embuild/
.env.local
.env.*.local
# Secret keys and credentials
.secret_key
*.key
*.pem
credentials.json

15
.markdownlint-cli2.yaml Normal file
View file

@ -0,0 +1,15 @@
config:
default: true
MD013: false
MD007: false
MD031: false
MD032: false
MD033: false
MD040: false
MD041: false
MD060: false
MD024:
allow_different_nesting: true
ignores:
- "target/**"

413
AGENTS.md Normal file
View file

@ -0,0 +1,413 @@
# AGENTS.md — ZeroClaw Agent Engineering Protocol
This file defines the default working protocol for coding agents in this repository.
Scope: entire repository.
## 1) Project Snapshot (Read First)
ZeroClaw is a Rust-first autonomous agent runtime optimized for:
- high performance
- high efficiency
- high stability
- high extensibility
- high sustainability
- high security
Core architecture is trait-driven and modular. Most extension work should be done by implementing traits and registering in factory modules.
Key extension points:
- `src/providers/traits.rs` (`Provider`)
- `src/channels/traits.rs` (`Channel`)
- `src/tools/traits.rs` (`Tool`)
- `src/memory/traits.rs` (`Memory`)
- `src/observability/traits.rs` (`Observer`)
- `src/runtime/traits.rs` (`RuntimeAdapter`)
- `src/peripherals/traits.rs` (`Peripheral`) — hardware boards (STM32, RPi GPIO)
## 2) Deep Architecture Observations (Why This Protocol Exists)
These codebase realities should drive every design decision:
1. **Trait + factory architecture is the stability backbone**
- Extension points are intentionally explicit and swappable.
- Most features should be added via trait implementation + factory registration, not cross-cutting rewrites.
2. **Security-critical surfaces are first-class and internet-adjacent**
- `src/gateway/`, `src/security/`, `src/tools/`, `src/runtime/` carry high blast radius.
- Defaults already lean secure-by-default (pairing, bind safety, limits, secret handling); keep it that way.
3. **Performance and binary size are product goals, not nice-to-have**
- `Cargo.toml` release profile and dependency choices optimize for size and determinism.
- Convenience dependencies and broad abstractions can silently regress these goals.
4. **Config and runtime contracts are user-facing API**
- `src/config/schema.rs` and CLI commands are effectively public interfaces.
- Backward compatibility and explicit migration matter.
5. **The project now runs in high-concurrency collaboration mode**
- CI + docs governance + label routing are part of the product delivery system.
- PR throughput is a design constraint; not just a maintainer inconvenience.
## 3) Engineering Principles (Normative)
These principles are mandatory by default. They are not slogans; they are implementation constraints.
### 3.1 KISS (Keep It Simple, Stupid)
**Why here:** Runtime + security behavior must stay auditable under pressure.
Required:
- Prefer straightforward control flow over clever meta-programming.
- Prefer explicit match branches and typed structs over hidden dynamic behavior.
- Keep error paths obvious and localized.
### 3.2 YAGNI (You Aren't Gonna Need It)
**Why here:** Premature features increase attack surface and maintenance burden.
Required:
- Do not add new config keys, trait methods, feature flags, or workflow branches without a concrete accepted use case.
- Do not introduce speculative “future-proof” abstractions without at least one current caller.
- Keep unsupported paths explicit (error out) rather than adding partial fake support.
### 3.3 DRY + Rule of Three
**Why here:** Naive DRY can create brittle shared abstractions across providers/channels/tools.
Required:
- Duplicate small, local logic when it preserves clarity.
- Extract shared utilities only after repeated, stable patterns (rule-of-three).
- When extracting, preserve module boundaries and avoid hidden coupling.
### 3.4 SRP + ISP (Single Responsibility + Interface Segregation)
**Why here:** Trait-driven architecture already encodes subsystem boundaries.
Required:
- Keep each module focused on one concern.
- Extend behavior by implementing existing narrow traits whenever possible.
- Avoid fat interfaces and “god modules” that mix policy + transport + storage.
### 3.5 Fail Fast + Explicit Errors
**Why here:** Silent fallback in agent runtimes can create unsafe or costly behavior.
Required:
- Prefer explicit `bail!`/errors for unsupported or unsafe states.
- Never silently broaden permissions/capabilities.
- Document fallback behavior when fallback is intentional and safe.
### 3.6 Secure by Default + Least Privilege
**Why here:** Gateway/tools/runtime can execute actions with real-world side effects.
Required:
- Deny-by-default for access and exposure boundaries.
- Never log secrets, raw tokens, or sensitive payloads.
- Keep network/filesystem/shell scope as narrow as possible unless explicitly justified.
### 3.7 Determinism + Reproducibility
**Why here:** Reliable CI and low-latency triage depend on deterministic behavior.
Required:
- Prefer reproducible commands and locked dependency behavior in CI-sensitive paths.
- Keep tests deterministic (no flaky timing/network dependence without guardrails).
- Ensure local validation commands map to CI expectations.
### 3.8 Reversibility + Rollback-First Thinking
**Why here:** Fast recovery is mandatory under high PR volume.
Required:
- Keep changes easy to revert (small scope, clear blast radius).
- For risky changes, define rollback path before merge.
- Avoid mixed mega-patches that block safe rollback.
## 4) Repository Map (High-Level)
- `src/main.rs` — CLI entrypoint and command routing
- `src/lib.rs` — module exports and shared command enums
- `src/config/` — schema + config loading/merging
- `src/agent/` — orchestration loop
- `src/gateway/` — webhook/gateway server
- `src/security/` — policy, pairing, secret store
- `src/memory/` — markdown/sqlite memory backends + embeddings/vector merge
- `src/providers/` — model providers and resilient wrapper
- `src/channels/` — Telegram/Discord/Slack/etc channels
- `src/tools/` — tool execution surface (shell, file, memory, browser)
- `src/peripherals/` — hardware peripherals (STM32, RPi GPIO); see `docs/hardware-peripherals-design.md`
- `src/runtime/` — runtime adapters (currently native)
- `docs/` — architecture + process docs
- `.github/` — CI, templates, automation workflows
## 5) Risk Tiers by Path (Review Depth Contract)
Use these tiers when deciding validation depth and review rigor.
- **Low risk**: docs/chore/tests-only changes
- **Medium risk**: most `src/**` behavior changes without boundary/security impact
- **High risk**: `src/security/**`, `src/runtime/**`, `src/gateway/**`, `src/tools/**`, `.github/workflows/**`, access-control boundaries
When uncertain, classify as higher risk.
## 6) Agent Workflow (Required)
1. **Read before write**
- Inspect existing module, factory wiring, and adjacent tests before editing.
2. **Define scope boundary**
- One concern per PR; avoid mixed feature+refactor+infra patches.
3. **Implement minimal patch**
- Apply KISS/YAGNI/DRY rule-of-three explicitly.
4. **Validate by risk tier**
- Docs-only: lightweight checks.
- Code/risky changes: full relevant checks and focused scenarios.
5. **Document impact**
- Update docs/PR notes for behavior, risk, side effects, and rollback.
6. **Respect queue hygiene**
- If stacked PR: declare `Depends on #...`.
- If replacing old PR: declare `Supersedes #...`.
### 6.3 Branch / Commit / PR Flow (Required)
All contributors (human or agent) must follow the same collaboration flow:
- Create and work from a non-`main` branch.
- Commit changes to that branch with clear, scoped commit messages.
- Open a PR to `main`; do not push directly to `main`.
- Wait for required checks and review outcomes before merging.
- Merge via PR controls (squash/rebase/merge as repository policy allows).
- Branch deletion after merge is optional; long-lived branches are allowed when intentionally maintained.
### 6.4 Worktree Workflow (Required for Multi-Track Agent Work)
Use Git worktrees to isolate concurrent agent/human tracks safely and predictably:
- Use one worktree per active branch/PR stream to avoid cross-task contamination.
- Keep each worktree on a single branch; do not mix unrelated edits in one worktree.
- Run validation commands inside the corresponding worktree before commit/PR.
- Name worktrees clearly by scope (for example: `wt/ci-hardening`, `wt/provider-fix`) and remove stale worktrees when no longer needed.
- PR checkpoint rules from section 6.3 still apply to worktree-based development.
### 6.1 Code Naming Contract (Required)
Apply these naming rules for all code changes unless a subsystem has a stronger existing pattern.
- Use Rust standard casing consistently: modules/files `snake_case`, types/traits/enums `PascalCase`, functions/variables `snake_case`, constants/statics `SCREAMING_SNAKE_CASE`.
- Name types and modules by domain role, not implementation detail (for example `DiscordChannel`, `SecurityPolicy`, `MemoryStore` over vague names like `Manager`/`Helper`).
- Keep trait implementer naming explicit and predictable: `<ProviderName>Provider`, `<ChannelName>Channel`, `<ToolName>Tool`, `<BackendName>Memory`.
- Keep factory registration keys stable, lowercase, and user-facing (for example `"openai"`, `"discord"`, `"shell"`), and avoid alias sprawl without migration need.
- Name tests by behavior/outcome (`<subject>_<expected_behavior>`) and keep fixture identifiers neutral/project-scoped.
- If identity-like naming is required in tests/examples, use ZeroClaw-native labels only (`ZeroClawAgent`, `zeroclaw_user`, `zeroclaw_node`).
### 6.2 Architecture Boundary Contract (Required)
Use these rules to keep the trait/factory architecture stable under growth.
- Extend capabilities by adding trait implementations + factory wiring first; avoid cross-module rewrites for isolated features.
- Keep dependency direction inward to contracts: concrete integrations depend on trait/config/util layers, not on other concrete integrations.
- Avoid creating cross-subsystem coupling (for example provider code importing channel internals, tool code mutating gateway policy directly).
- Keep module responsibilities single-purpose: orchestration in `agent/`, transport in `channels/`, model I/O in `providers/`, policy in `security/`, execution in `tools/`.
- Introduce new shared abstractions only after repeated use (rule-of-three), with at least one real caller in current scope.
- For config/schema changes, treat keys as public contract: document defaults, compatibility impact, and migration/rollback path.
## 7) Change Playbooks
### 7.1 Adding a Provider
- Implement `Provider` in `src/providers/`.
- Register in `src/providers/mod.rs` factory.
- Add focused tests for factory wiring and error paths.
- Avoid provider-specific behavior leaks into shared orchestration code.
### 7.2 Adding a Channel
- Implement `Channel` in `src/channels/`.
- Keep `send`, `listen`, `health_check`, typing semantics consistent.
- Cover auth/allowlist/health behavior with tests.
### 7.3 Adding a Tool
- Implement `Tool` in `src/tools/` with strict parameter schema.
- Validate and sanitize all inputs.
- Return structured `ToolResult`; avoid panics in runtime path.
### 5.4 Adding a Peripheral
- Implement `Peripheral` in `src/peripherals/`.
- Peripherals expose `tools()` — each tool delegates to the hardware (GPIO, sensors, etc.).
- Register board type in config schema if needed.
- See `docs/hardware-peripherals-design.md` for protocol and firmware notes.
### 5.5 Security / Runtime / Gateway Changes
- Include threat/risk notes and rollback strategy.
- Add/update tests or validation evidence for failure modes and boundaries.
- Keep observability useful but non-sensitive.
- For `.github/workflows/**` changes, include Actions allowlist impact in PR notes and update `docs/actions-source-policy.md` when sources change.
## 8) Validation Matrix
Default local checks for code changes:
```bash
cargo fmt --all -- --check
cargo clippy --all-targets -- -D warnings
cargo test
```
Preferred local pre-PR validation path (recommended, not required):
```bash
./dev/ci.sh all
```
Notes:
- Local Docker-based CI is strongly recommended when Docker is available.
- Contributors are not blocked from opening a PR if local Docker CI is unavailable; in that case run the most relevant native checks and document what was run.
Additional expectations by change type:
- **Docs/template-only**: run markdown lint and relevant doc checks.
- **Workflow changes**: validate YAML syntax; run workflow lint/sanity checks when available.
- **Security/runtime/gateway/tools**: include at least one boundary/failure-mode validation.
If full checks are impractical, run the most relevant subset and document what was skipped and why.
## 9) Collaboration and PR Discipline
- Follow `.github/pull_request_template.md` fully (including side effects / blast radius).
- Keep PR descriptions concrete: problem, change, non-goals, risk, rollback.
- Use conventional commit titles.
- Prefer small PRs (`size: XS/S/M`) when possible.
- Agent-assisted PRs are welcome, **but contributors remain accountable for understanding what their code will do**.
### 9.1 Privacy/Sensitive Data and Neutral Wording (Required)
Treat privacy and neutrality as merge gates, not best-effort guidelines.
- Never commit personal or sensitive data in code, docs, tests, fixtures, snapshots, logs, examples, or commit messages.
- Prohibited data includes (non-exhaustive): real names, personal emails, phone numbers, addresses, access tokens, API keys, credentials, IDs, and private URLs.
- Use neutral project-scoped placeholders (for example: `user_a`, `test_user`, `project_bot`, `example.com`) instead of real identity data.
- Test names/messages/fixtures must be impersonal and system-focused; avoid first-person or identity-specific language.
- If identity-like context is unavoidable, use ZeroClaw-scoped roles/labels only (for example: `ZeroClawAgent`, `ZeroClawOperator`, `zeroclaw_user`) and avoid real-world personas.
- Recommended identity-safe naming palette (use when identity-like context is required):
- actor labels: `ZeroClawAgent`, `ZeroClawOperator`, `ZeroClawMaintainer`, `zeroclaw_user`
- service/runtime labels: `zeroclaw_bot`, `zeroclaw_service`, `zeroclaw_runtime`, `zeroclaw_node`
- environment labels: `zeroclaw_project`, `zeroclaw_workspace`, `zeroclaw_channel`
- If reproducing external incidents, redact and anonymize all payloads before committing.
- Before push, review `git diff --cached` specifically for accidental sensitive strings and identity leakage.
### 9.2 Superseded-PR Attribution (Required)
When a PR supersedes another contributor's PR and carries forward substantive code or design decisions, preserve authorship explicitly.
- In the integrating commit message, add one `Co-authored-by: Name <email>` trailer per superseded contributor whose work is materially incorporated.
- Use a GitHub-recognized email (`<login@users.noreply.github.com>` or the contributor's verified commit email) so attribution is rendered correctly.
- Keep trailers on their own lines after a blank line at commit-message end; never encode them as escaped `\\n` text.
- In the PR body, list superseded PR links and briefly state what was incorporated from each.
- If no actual code/design was incorporated (only inspiration), do not use `Co-authored-by`; give credit in PR notes instead.
### 9.3 Superseded-PR PR Template (Recommended)
When superseding multiple PRs, use a consistent title/body structure to reduce reviewer ambiguity.
- Recommended title format: `feat(<scope>): unify and supersede #<pr_a>, #<pr_b> [and #<pr_n>]`
- If this is docs/chore/meta only, keep the same supersede suffix and use the appropriate conventional-commit type.
- In the PR body, include the following template (fill placeholders, remove non-applicable lines):
```md
## Supersedes
- #<pr_a> by @<author_a>
- #<pr_b> by @<author_b>
- #<pr_n> by @<author_n>
## Integrated Scope
- From #<pr_a>: <what was materially incorporated>
- From #<pr_b>: <what was materially incorporated>
- From #<pr_n>: <what was materially incorporated>
## Attribution
- Co-authored-by trailers added for materially incorporated contributors: Yes/No
- If No, explain why (for example: no direct code/design carry-over)
## Non-goals
- <explicitly list what was not carried over>
## Risk and Rollback
- Risk: <summary>
- Rollback: <revert commit/PR strategy>
```
### 9.4 Superseded-PR Commit Template (Recommended)
When a commit unifies or supersedes prior PR work, use a deterministic commit message layout so attribution is machine-parsed and reviewer-friendly.
- Keep one blank line between message sections, and exactly one blank line before trailer lines.
- Keep each trailer on its own line; do not wrap, indent, or encode as escaped `\n` text.
- Add one `Co-authored-by` trailer per materially incorporated contributor, using GitHub-recognized email.
- If no direct code/design is carried over, omit `Co-authored-by` and explain attribution in the PR body instead.
```text
feat(<scope>): unify and supersede #<pr_a>, #<pr_b> [and #<pr_n>]
<one-paragraph summary of integrated outcome>
Supersedes:
- #<pr_a> by @<author_a>
- #<pr_b> by @<author_b>
- #<pr_n> by @<author_n>
Integrated scope:
- <subsystem_or_feature_a>: from #<pr_x>
- <subsystem_or_feature_b>: from #<pr_y>
Co-authored-by: <Name A> <login_a@users.noreply.github.com>
Co-authored-by: <Name B> <login_b@users.noreply.github.com>
```
Reference docs:
- `CONTRIBUTING.md`
- `docs/pr-workflow.md`
- `docs/reviewer-playbook.md`
- `docs/ci-map.md`
- `docs/actions-source-policy.md`
## 10) Anti-Patterns (Do Not)
- Do not add heavy dependencies for minor convenience.
- Do not silently weaken security policy or access constraints.
- Do not add speculative config/feature flags “just in case”.
- Do not mix massive formatting-only changes with functional changes.
- Do not modify unrelated modules “while here”.
- Do not bypass failing checks without explicit explanation.
- Do not hide behavior-changing side effects in refactor commits.
- Do not include personal identity or sensitive information in test data, examples, docs, or commits.
## 11) Handoff Template (Agent -> Agent / Maintainer)
When handing off work, include:
1. What changed
2. What did not change
3. Validation run and results
4. Remaining risks / unknowns
5. Next recommended action
## 12) Vibe Coding Guardrails
When working in fast iterative mode:
- Keep each iteration reversible (small commits, clear rollback).
- Validate assumptions with code search before implementing.
- Prefer deterministic behavior over clever shortcuts.
- Do not “ship and hope” on security-sensitive paths.
- If uncertain, leave a concrete TODO with verification context, not a hidden guess.

View file

@ -23,7 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `enc:` prefix for encrypted secrets — Use `enc2:` (ChaCha20-Poly1305) instead. - `enc:` prefix for encrypted secrets — Use `enc2:` (ChaCha20-Poly1305) instead.
Legacy values are still decrypted for backward compatibility but should be migrated. Legacy values are still decrypted for backward compatibility but should be migrated.
## [0.1.0] - 2025-02-13 ## [0.1.0] - 2026-02-13
### Added ### Added
- **Core Architecture**: Trait-based pluggable system for Provider, Channel, Observer, RuntimeAdapter, Tool - **Core Architecture**: Trait-based pluggable system for Provider, Channel, Observer, RuntimeAdapter, Tool

413
CLAUDE.md Normal file
View file

@ -0,0 +1,413 @@
# CLAUDE.md — ZeroClaw Agent Engineering Protocol
This file defines the default working protocol for claude code in this repository.
Scope: entire repository.
## 1) Project Snapshot (Read First)
ZeroClaw is a Rust-first autonomous agent runtime optimized for:
- high performance
- high efficiency
- high stability
- high extensibility
- high sustainability
- high security
Core architecture is trait-driven and modular. Most extension work should be done by implementing traits and registering in factory modules.
Key extension points:
- `src/providers/traits.rs` (`Provider`)
- `src/channels/traits.rs` (`Channel`)
- `src/tools/traits.rs` (`Tool`)
- `src/memory/traits.rs` (`Memory`)
- `src/observability/traits.rs` (`Observer`)
- `src/runtime/traits.rs` (`RuntimeAdapter`)
- `src/peripherals/traits.rs` (`Peripheral`) — hardware boards (STM32, RPi GPIO)
## 2) Deep Architecture Observations (Why This Protocol Exists)
These codebase realities should drive every design decision:
1. **Trait + factory architecture is the stability backbone**
- Extension points are intentionally explicit and swappable.
- Most features should be added via trait implementation + factory registration, not cross-cutting rewrites.
2. **Security-critical surfaces are first-class and internet-adjacent**
- `src/gateway/`, `src/security/`, `src/tools/`, `src/runtime/` carry high blast radius.
- Defaults already lean secure-by-default (pairing, bind safety, limits, secret handling); keep it that way.
3. **Performance and binary size are product goals, not nice-to-have**
- `Cargo.toml` release profile and dependency choices optimize for size and determinism.
- Convenience dependencies and broad abstractions can silently regress these goals.
4. **Config and runtime contracts are user-facing API**
- `src/config/schema.rs` and CLI commands are effectively public interfaces.
- Backward compatibility and explicit migration matter.
5. **The project now runs in high-concurrency collaboration mode**
- CI + docs governance + label routing are part of the product delivery system.
- PR throughput is a design constraint; not just a maintainer inconvenience.
## 3) Engineering Principles (Normative)
These principles are mandatory by default. They are not slogans; they are implementation constraints.
### 3.1 KISS (Keep It Simple, Stupid)
**Why here:** Runtime + security behavior must stay auditable under pressure.
Required:
- Prefer straightforward control flow over clever meta-programming.
- Prefer explicit match branches and typed structs over hidden dynamic behavior.
- Keep error paths obvious and localized.
### 3.2 YAGNI (You Aren't Gonna Need It)
**Why here:** Premature features increase attack surface and maintenance burden.
Required:
- Do not add new config keys, trait methods, feature flags, or workflow branches without a concrete accepted use case.
- Do not introduce speculative “future-proof” abstractions without at least one current caller.
- Keep unsupported paths explicit (error out) rather than adding partial fake support.
### 3.3 DRY + Rule of Three
**Why here:** Naive DRY can create brittle shared abstractions across providers/channels/tools.
Required:
- Duplicate small, local logic when it preserves clarity.
- Extract shared utilities only after repeated, stable patterns (rule-of-three).
- When extracting, preserve module boundaries and avoid hidden coupling.
### 3.4 SRP + ISP (Single Responsibility + Interface Segregation)
**Why here:** Trait-driven architecture already encodes subsystem boundaries.
Required:
- Keep each module focused on one concern.
- Extend behavior by implementing existing narrow traits whenever possible.
- Avoid fat interfaces and “god modules” that mix policy + transport + storage.
### 3.5 Fail Fast + Explicit Errors
**Why here:** Silent fallback in agent runtimes can create unsafe or costly behavior.
Required:
- Prefer explicit `bail!`/errors for unsupported or unsafe states.
- Never silently broaden permissions/capabilities.
- Document fallback behavior when fallback is intentional and safe.
### 3.6 Secure by Default + Least Privilege
**Why here:** Gateway/tools/runtime can execute actions with real-world side effects.
Required:
- Deny-by-default for access and exposure boundaries.
- Never log secrets, raw tokens, or sensitive payloads.
- Keep network/filesystem/shell scope as narrow as possible unless explicitly justified.
### 3.7 Determinism + Reproducibility
**Why here:** Reliable CI and low-latency triage depend on deterministic behavior.
Required:
- Prefer reproducible commands and locked dependency behavior in CI-sensitive paths.
- Keep tests deterministic (no flaky timing/network dependence without guardrails).
- Ensure local validation commands map to CI expectations.
### 3.8 Reversibility + Rollback-First Thinking
**Why here:** Fast recovery is mandatory under high PR volume.
Required:
- Keep changes easy to revert (small scope, clear blast radius).
- For risky changes, define rollback path before merge.
- Avoid mixed mega-patches that block safe rollback.
## 4) Repository Map (High-Level)
- `src/main.rs` — CLI entrypoint and command routing
- `src/lib.rs` — module exports and shared command enums
- `src/config/` — schema + config loading/merging
- `src/agent/` — orchestration loop
- `src/gateway/` — webhook/gateway server
- `src/security/` — policy, pairing, secret store
- `src/memory/` — markdown/sqlite memory backends + embeddings/vector merge
- `src/providers/` — model providers and resilient wrapper
- `src/channels/` — Telegram/Discord/Slack/etc channels
- `src/tools/` — tool execution surface (shell, file, memory, browser)
- `src/peripherals/` — hardware peripherals (STM32, RPi GPIO); see `docs/hardware-peripherals-design.md`
- `src/runtime/` — runtime adapters (currently native)
- `docs/` — architecture + process docs
- `.github/` — CI, templates, automation workflows
## 5) Risk Tiers by Path (Review Depth Contract)
Use these tiers when deciding validation depth and review rigor.
- **Low risk**: docs/chore/tests-only changes
- **Medium risk**: most `src/**` behavior changes without boundary/security impact
- **High risk**: `src/security/**`, `src/runtime/**`, `src/gateway/**`, `src/tools/**`, `.github/workflows/**`, access-control boundaries
When uncertain, classify as higher risk.
## 6) Agent Workflow (Required)
1. **Read before write**
- Inspect existing module, factory wiring, and adjacent tests before editing.
2. **Define scope boundary**
- One concern per PR; avoid mixed feature+refactor+infra patches.
3. **Implement minimal patch**
- Apply KISS/YAGNI/DRY rule-of-three explicitly.
4. **Validate by risk tier**
- Docs-only: lightweight checks.
- Code/risky changes: full relevant checks and focused scenarios.
5. **Document impact**
- Update docs/PR notes for behavior, risk, side effects, and rollback.
6. **Respect queue hygiene**
- If stacked PR: declare `Depends on #...`.
- If replacing old PR: declare `Supersedes #...`.
### 6.3 Branch / Commit / PR Flow (Required)
All contributors (human or agent) must follow the same collaboration flow:
- Create and work from a non-`main` branch.
- Commit changes to that branch with clear, scoped commit messages.
- Open a PR to `main`; do not push directly to `main`.
- Wait for required checks and review outcomes before merging.
- Merge via PR controls (squash/rebase/merge as repository policy allows).
- Branch deletion after merge is optional; long-lived branches are allowed when intentionally maintained.
### 6.4 Worktree Workflow (Required for Multi-Track Agent Work)
Use Git worktrees to isolate concurrent agent/human tracks safely and predictably:
- Use one worktree per active branch/PR stream to avoid cross-task contamination.
- Keep each worktree on a single branch; do not mix unrelated edits in one worktree.
- Run validation commands inside the corresponding worktree before commit/PR.
- Name worktrees clearly by scope (for example: `wt/ci-hardening`, `wt/provider-fix`) and remove stale worktrees when no longer needed.
- PR checkpoint rules from section 6.3 still apply to worktree-based development.
### 6.1 Code Naming Contract (Required)
Apply these naming rules for all code changes unless a subsystem has a stronger existing pattern.
- Use Rust standard casing consistently: modules/files `snake_case`, types/traits/enums `PascalCase`, functions/variables `snake_case`, constants/statics `SCREAMING_SNAKE_CASE`.
- Name types and modules by domain role, not implementation detail (for example `DiscordChannel`, `SecurityPolicy`, `MemoryStore` over vague names like `Manager`/`Helper`).
- Keep trait implementer naming explicit and predictable: `<ProviderName>Provider`, `<ChannelName>Channel`, `<ToolName>Tool`, `<BackendName>Memory`.
- Keep factory registration keys stable, lowercase, and user-facing (for example `"openai"`, `"discord"`, `"shell"`), and avoid alias sprawl without migration need.
- Name tests by behavior/outcome (`<subject>_<expected_behavior>`) and keep fixture identifiers neutral/project-scoped.
- If identity-like naming is required in tests/examples, use ZeroClaw-native labels only (`ZeroClawAgent`, `zeroclaw_user`, `zeroclaw_node`).
### 6.2 Architecture Boundary Contract (Required)
Use these rules to keep the trait/factory architecture stable under growth.
- Extend capabilities by adding trait implementations + factory wiring first; avoid cross-module rewrites for isolated features.
- Keep dependency direction inward to contracts: concrete integrations depend on trait/config/util layers, not on other concrete integrations.
- Avoid creating cross-subsystem coupling (for example provider code importing channel internals, tool code mutating gateway policy directly).
- Keep module responsibilities single-purpose: orchestration in `agent/`, transport in `channels/`, model I/O in `providers/`, policy in `security/`, execution in `tools/`.
- Introduce new shared abstractions only after repeated use (rule-of-three), with at least one real caller in current scope.
- For config/schema changes, treat keys as public contract: document defaults, compatibility impact, and migration/rollback path.
## 7) Change Playbooks
### 7.1 Adding a Provider
- Implement `Provider` in `src/providers/`.
- Register in `src/providers/mod.rs` factory.
- Add focused tests for factory wiring and error paths.
- Avoid provider-specific behavior leaks into shared orchestration code.
### 7.2 Adding a Channel
- Implement `Channel` in `src/channels/`.
- Keep `send`, `listen`, `health_check`, typing semantics consistent.
- Cover auth/allowlist/health behavior with tests.
### 7.3 Adding a Tool
- Implement `Tool` in `src/tools/` with strict parameter schema.
- Validate and sanitize all inputs.
- Return structured `ToolResult`; avoid panics in runtime path.
### 5.4 Adding a Peripheral
- Implement `Peripheral` in `src/peripherals/`.
- Peripherals expose `tools()` — each tool delegates to the hardware (GPIO, sensors, etc.).
- Register board type in config schema if needed.
- See `docs/hardware-peripherals-design.md` for protocol and firmware notes.
### 5.5 Security / Runtime / Gateway Changes
- Include threat/risk notes and rollback strategy.
- Add/update tests or validation evidence for failure modes and boundaries.
- Keep observability useful but non-sensitive.
- For `.github/workflows/**` changes, include Actions allowlist impact in PR notes and update `docs/actions-source-policy.md` when sources change.
## 8) Validation Matrix
Default local checks for code changes:
```bash
cargo fmt --all -- --check
cargo clippy --all-targets -- -D warnings
cargo test
```
Preferred local pre-PR validation path (recommended, not required):
```bash
./dev/ci.sh all
```
Notes:
- Local Docker-based CI is strongly recommended when Docker is available.
- Contributors are not blocked from opening a PR if local Docker CI is unavailable; in that case run the most relevant native checks and document what was run.
Additional expectations by change type:
- **Docs/template-only**: run markdown lint and relevant doc checks.
- **Workflow changes**: validate YAML syntax; run workflow lint/sanity checks when available.
- **Security/runtime/gateway/tools**: include at least one boundary/failure-mode validation.
If full checks are impractical, run the most relevant subset and document what was skipped and why.
## 9) Collaboration and PR Discipline
- Follow `.github/pull_request_template.md` fully (including side effects / blast radius).
- Keep PR descriptions concrete: problem, change, non-goals, risk, rollback.
- Use conventional commit titles.
- Prefer small PRs (`size: XS/S/M`) when possible.
- Agent-assisted PRs are welcome, **but contributors remain accountable for understanding what their code will do**.
### 9.1 Privacy/Sensitive Data and Neutral Wording (Required)
Treat privacy and neutrality as merge gates, not best-effort guidelines.
- Never commit personal or sensitive data in code, docs, tests, fixtures, snapshots, logs, examples, or commit messages.
- Prohibited data includes (non-exhaustive): real names, personal emails, phone numbers, addresses, access tokens, API keys, credentials, IDs, and private URLs.
- Use neutral project-scoped placeholders (for example: `user_a`, `test_user`, `project_bot`, `example.com`) instead of real identity data.
- Test names/messages/fixtures must be impersonal and system-focused; avoid first-person or identity-specific language.
- If identity-like context is unavoidable, use ZeroClaw-scoped roles/labels only (for example: `ZeroClawAgent`, `ZeroClawOperator`, `zeroclaw_user`) and avoid real-world personas.
- Recommended identity-safe naming palette (use when identity-like context is required):
- actor labels: `ZeroClawAgent`, `ZeroClawOperator`, `ZeroClawMaintainer`, `zeroclaw_user`
- service/runtime labels: `zeroclaw_bot`, `zeroclaw_service`, `zeroclaw_runtime`, `zeroclaw_node`
- environment labels: `zeroclaw_project`, `zeroclaw_workspace`, `zeroclaw_channel`
- If reproducing external incidents, redact and anonymize all payloads before committing.
- Before push, review `git diff --cached` specifically for accidental sensitive strings and identity leakage.
### 9.2 Superseded-PR Attribution (Required)
When a PR supersedes another contributor's PR and carries forward substantive code or design decisions, preserve authorship explicitly.
- In the integrating commit message, add one `Co-authored-by: Name <email>` trailer per superseded contributor whose work is materially incorporated.
- Use a GitHub-recognized email (`<login@users.noreply.github.com>` or the contributor's verified commit email) so attribution is rendered correctly.
- Keep trailers on their own lines after a blank line at commit-message end; never encode them as escaped `\\n` text.
- In the PR body, list superseded PR links and briefly state what was incorporated from each.
- If no actual code/design was incorporated (only inspiration), do not use `Co-authored-by`; give credit in PR notes instead.
### 9.3 Superseded-PR PR Template (Recommended)
When superseding multiple PRs, use a consistent title/body structure to reduce reviewer ambiguity.
- Recommended title format: `feat(<scope>): unify and supersede #<pr_a>, #<pr_b> [and #<pr_n>]`
- If this is docs/chore/meta only, keep the same supersede suffix and use the appropriate conventional-commit type.
- In the PR body, include the following template (fill placeholders, remove non-applicable lines):
```md
## Supersedes
- #<pr_a> by @<author_a>
- #<pr_b> by @<author_b>
- #<pr_n> by @<author_n>
## Integrated Scope
- From #<pr_a>: <what was materially incorporated>
- From #<pr_b>: <what was materially incorporated>
- From #<pr_n>: <what was materially incorporated>
## Attribution
- Co-authored-by trailers added for materially incorporated contributors: Yes/No
- If No, explain why (for example: no direct code/design carry-over)
## Non-goals
- <explicitly list what was not carried over>
## Risk and Rollback
- Risk: <summary>
- Rollback: <revert commit/PR strategy>
```
### 9.4 Superseded-PR Commit Template (Recommended)
When a commit unifies or supersedes prior PR work, use a deterministic commit message layout so attribution is machine-parsed and reviewer-friendly.
- Keep one blank line between message sections, and exactly one blank line before trailer lines.
- Keep each trailer on its own line; do not wrap, indent, or encode as escaped `\n` text.
- Add one `Co-authored-by` trailer per materially incorporated contributor, using GitHub-recognized email.
- If no direct code/design is carried over, omit `Co-authored-by` and explain attribution in the PR body instead.
```text
feat(<scope>): unify and supersede #<pr_a>, #<pr_b> [and #<pr_n>]
<one-paragraph summary of integrated outcome>
Supersedes:
- #<pr_a> by @<author_a>
- #<pr_b> by @<author_b>
- #<pr_n> by @<author_n>
Integrated scope:
- <subsystem_or_feature_a>: from #<pr_x>
- <subsystem_or_feature_b>: from #<pr_y>
Co-authored-by: <Name A> <login_a@users.noreply.github.com>
Co-authored-by: <Name B> <login_b@users.noreply.github.com>
```
Reference docs:
- `CONTRIBUTING.md`
- `docs/pr-workflow.md`
- `docs/reviewer-playbook.md`
- `docs/ci-map.md`
- `docs/actions-source-policy.md`
## 10) Anti-Patterns (Do Not)
- Do not add heavy dependencies for minor convenience.
- Do not silently weaken security policy or access constraints.
- Do not add speculative config/feature flags “just in case”.
- Do not mix massive formatting-only changes with functional changes.
- Do not modify unrelated modules “while here”.
- Do not bypass failing checks without explicit explanation.
- Do not hide behavior-changing side effects in refactor commits.
- Do not include personal identity or sensitive information in test data, examples, docs, or commits.
## 11) Handoff Template (Agent -> Agent / Maintainer)
When handing off work, include:
1. What changed
2. What did not change
3. Validation run and results
4. Remaining risks / unknowns
5. Next recommended action
## 12) Vibe Coding Guardrails
When working in fast iterative mode:
- Keep each iteration reversible (small commits, clear rollback).
- Validate assumptions with code search before implementing.
- Prefer deterministic behavior over clever shortcuts.
- Do not “ship and hope” on security-sensitive paths.
- If uncertain, leave a concrete TODO with verification context, not a hidden guess.

View file

@ -6,7 +6,7 @@ Thanks for your interest in contributing to ZeroClaw! This guide will help you g
```bash ```bash
# Clone the repo # Clone the repo
git clone https://github.com/theonlyhennygod/zeroclaw.git git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw cd zeroclaw
# Enable the pre-push hook (runs fmt, clippy, tests before every push) # Enable the pre-push hook (runs fmt, clippy, tests before every push)
@ -16,18 +16,60 @@ git config core.hooksPath .githooks
cargo build cargo build
# Run tests (all must pass) # Run tests (all must pass)
cargo test cargo test --locked
# Format & lint (must pass before PR) # Format & lint (required before PR)
cargo fmt && cargo clippy -- -D warnings ./scripts/ci/rust_quality_gate.sh
# Optional strict lint audit (full repo, recommended periodically)
./scripts/ci/rust_quality_gate.sh --strict
# Optional strict lint delta gate (blocks only changed Rust lines)
./scripts/ci/rust_strict_delta_gate.sh
# Optional docs lint gate (blocks only markdown issues on changed lines)
./scripts/ci/docs_quality_gate.sh
# Optional docs links gate (checks only links added on changed lines)
./scripts/ci/docs_links_gate.sh
# Release build (~3.4MB) # Release build (~3.4MB)
cargo build --release cargo build --release --locked
``` ```
### Pre-push hook ### Pre-push hook
The repo includes a pre-push hook in `.githooks/` that enforces `cargo fmt --check`, `cargo clippy -- -D warnings`, and `cargo test` before every push. Enable it with `git config core.hooksPath .githooks`. The repo includes a pre-push hook in `.githooks/` that enforces `./scripts/ci/rust_quality_gate.sh` and `cargo test --locked` before every push. Enable it with `git config core.hooksPath .githooks`.
For an opt-in strict lint pass during pre-push, set:
```bash
ZEROCLAW_STRICT_LINT=1 git push
```
For an opt-in strict lint delta pass during pre-push (changed Rust lines only), set:
```bash
ZEROCLAW_STRICT_DELTA_LINT=1 git push
```
For an opt-in docs quality pass during pre-push (changed-line markdown gate), set:
```bash
ZEROCLAW_DOCS_LINT=1 git push
```
For an opt-in docs links pass during pre-push (added-links gate), set:
```bash
ZEROCLAW_DOCS_LINKS=1 git push
```
For full CI parity in Docker, run:
```bash
./dev/ci.sh all
```
To skip it during rapid iteration: To skip it during rapid iteration:
@ -37,6 +79,182 @@ git push --no-verify
> **Note:** CI runs the same checks, so skipped hooks will be caught on the PR. > **Note:** CI runs the same checks, so skipped hooks will be caught on the PR.
## Local Secret Management (Required)
ZeroClaw supports layered secret management for local development and CI hygiene.
### Secret Storage Options
1. **Environment variables** (recommended for local development)
- Copy `.env.example` to `.env` and fill in values
- `.env` files are Git-ignored and should stay local
- Best for temporary/local API keys
2. **Config file** (`~/.zeroclaw/config.toml`)
- Persistent setup for long-term use
- When `secrets.encrypt = true` (default), secret values are encrypted before save
- Secret key is stored at `~/.zeroclaw/.secret_key` with restricted permissions
- Use `zeroclaw onboard` for guided setup
### Runtime Resolution Rules
API key resolution follows this order:
1. Explicit key passed from config/CLI
2. Provider-specific env vars (`OPENROUTER_API_KEY`, `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, ...)
3. Generic env vars (`ZEROCLAW_API_KEY`, `API_KEY`)
Provider/model config overrides:
- `ZEROCLAW_PROVIDER` / `PROVIDER`
- `ZEROCLAW_MODEL`
See `.env.example` for practical examples and currently supported provider key env vars.
### Pre-Commit Secret Hygiene (Mandatory)
Before every commit, verify:
- [ ] No `.env` files are staged (`.env.example` only)
- [ ] No raw API keys/tokens in code, tests, fixtures, examples, logs, or commit messages
- [ ] No credentials in debug output or error payloads
- [ ] `git diff --cached` has no accidental secret-like strings
Quick local audit:
```bash
# Search staged diff for common secret markers
git diff --cached | grep -iE '(api[_-]?key|secret|token|password|bearer|sk-)'
# Confirm no .env file is staged
git status --short | grep -E '\.env$'
```
### Optional Local Secret Scanning
For extra guardrails, install one of:
- **gitleaks**: [GitHub - gitleaks/gitleaks](https://github.com/gitleaks/gitleaks)
- **truffleHog**: [GitHub - trufflesecurity/trufflehog](https://github.com/trufflesecurity/trufflehog)
- **git-secrets**: [GitHub - awslabs/git-secrets](https://github.com/awslabs/git-secrets)
This repo includes `.githooks/pre-commit` to run `gitleaks protect --staged --redact` when gitleaks is installed.
Enable hooks with:
```bash
git config core.hooksPath .githooks
```
If gitleaks is not installed, the pre-commit hook prints a warning and continues.
### What Must Never Be Committed
- `.env` files (use `.env.example` only)
- API keys, tokens, passwords, or credentials (plain or encrypted)
- OAuth tokens or session identifiers
- Webhook signing secrets
- `~/.zeroclaw/.secret_key` or similar key files
- Personal identifiers or real user data in tests/fixtures
### If a Secret Is Committed Accidentally
1. Revoke/rotate the credential immediately
2. Do not rely only on `git revert` (history still contains the secret)
3. Purge history with `git filter-repo` or BFG
4. Force-push cleaned history (coordinate with maintainers)
5. Ensure the leaked value is removed from PR/issue/discussion/comment history
Reference: [GitHub guide: removing sensitive data from a repository](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/removing-sensitive-data-from-a-repository)
## Collaboration Tracks (Risk-Based)
To keep review throughput high without lowering quality, every PR should map to one track:
| Track | Typical scope | Required review depth |
|---|---|---|
| **Track A (Low risk)** | docs/tests/chore, isolated refactors, no security/runtime/CI impact | 1 maintainer review + green `CI Required Gate` |
| **Track B (Medium risk)** | providers/channels/memory/tools behavior changes | 1 subsystem-aware review + explicit validation evidence |
| **Track C (High risk)** | `src/security/**`, `src/runtime/**`, `src/gateway/**`, `.github/workflows/**`, access-control boundaries | 2-pass review (fast triage + deep risk review), rollback plan required |
When in doubt, choose the higher track.
## Documentation Optimization Principles
To keep docs useful under high PR volume, we use these rules:
- **Single source of truth**: policy lives in docs, not scattered across PR comments.
- **Decision-oriented content**: every checklist item should directly help accept/reject a change.
- **Risk-proportionate detail**: high-risk paths need deeper evidence; low-risk paths stay lightweight.
- **Side-effect visibility**: document blast radius, failure modes, and rollback before merge.
- **Automation assists, humans decide**: bots triage and label, but merge accountability stays human.
### Documentation System Map
| Doc | Primary purpose | When to update |
|---|---|---|
| `CONTRIBUTING.md` | contributor contract and readiness baseline | contributor expectations or policy changes |
| `docs/pr-workflow.md` | governance logic and merge contract | workflow/risk/merge gate changes |
| `docs/reviewer-playbook.md` | reviewer operating checklist | review depth or triage behavior changes |
| `docs/ci-map.md` | CI ownership and triage entry points | workflow trigger/job ownership changes |
## PR Definition of Ready (DoR)
Before requesting review, ensure all of the following are true:
- Scope is focused to a single concern.
- `.github/pull_request_template.md` is fully completed.
- Relevant local validation has been run (`fmt`, `clippy`, `test`, scenario checks).
- Security impact and rollback path are explicitly described.
- No personal/sensitive data is introduced in code/docs/tests/fixtures/logs/examples/commit messages.
- Tests/fixtures/examples use neutral project-scoped wording (no identity-specific or first-person phrasing).
- If identity-like wording is required, use ZeroClaw-centric labels only (for example: `ZeroClawAgent`, `ZeroClawOperator`, `zeroclaw_user`).
- Linked issue (or rationale for no issue) is included.
## PR Definition of Done (DoD)
A PR is merge-ready when:
- `CI Required Gate` is green.
- Required reviewers approved (including CODEOWNERS paths).
- Risk level matches changed paths (`risk: low/medium/high`).
- User-visible behavior, migration, and rollback notes are complete.
- Follow-up TODOs are explicit and tracked in issues.
## High-Volume Collaboration Rules
When PR traffic is high (especially with AI-assisted contributions), these rules keep quality and throughput stable:
- **One concern per PR**: avoid mixing refactor + feature + infra in one change.
- **Small PRs first**: prefer PR size `XS/S/M`; split large work into stacked PRs.
- **Template is mandatory**: complete every section in `.github/pull_request_template.md`.
- **Explicit rollback**: every PR must include a fast rollback path.
- **Security-first review**: changes in `src/security/`, runtime, gateway, and CI need stricter validation.
- **Risk-first triage**: use labels (`risk: high`, `risk: medium`, `risk: low`) to route review depth.
- **Privacy-first hygiene**: redact/anonymize sensitive payloads and keep tests/examples neutral and project-scoped.
- **Identity normalization**: when identity traits are unavoidable, use ZeroClaw/project-native roles instead of personal or real-world identities.
- **Supersede hygiene**: if your PR replaces an older open PR, add `Supersedes #...` and request maintainers close the outdated one.
Full maintainer workflow: [`docs/pr-workflow.md`](docs/pr-workflow.md).
CI workflow ownership and triage map: [`docs/ci-map.md`](docs/ci-map.md).
Reviewer operating checklist: [`docs/reviewer-playbook.md`](docs/reviewer-playbook.md).
## Agent Collaboration Guidance
Agent-assisted contributions are welcome and treated as first-class contributions.
For smoother agent-to-agent and human-to-agent review:
- Keep PR summaries concrete (problem, change, non-goals).
- Include reproducible validation evidence (`fmt`, `clippy`, `test`, scenario checks).
- Add brief workflow notes when automation materially influenced design/code.
- Agent-assisted PRs are welcome, but contributors remain accountable for understanding what the code does and what it could affect.
- Call out uncertainty and risky edges explicitly.
We do **not** require PRs to declare an AI-vs-human line ratio.
Agent implementation playbook lives in [`AGENTS.md`](AGENTS.md).
## Architecture: Trait-Based Pluggability ## Architecture: Trait-Based Pluggability
ZeroClaw's architecture is built on **traits** — every subsystem is swappable. This means contributing a new integration is as simple as implementing a trait and registering it in the factory function. ZeroClaw's architecture is built on **traits** — every subsystem is swappable. This means contributing a new integration is as simple as implementing a trait and registering it in the factory function.
@ -52,6 +270,57 @@ src/
└── security/ # Sandboxing → SecurityPolicy └── security/ # Sandboxing → SecurityPolicy
``` ```
## Code Naming Conventions (Required)
Use these defaults unless an existing subsystem pattern clearly overrides them.
- **Rust casing**: modules/files `snake_case`, types/traits/enums `PascalCase`, functions/variables `snake_case`, constants `SCREAMING_SNAKE_CASE`.
- **Domain-first naming**: prefer explicit role names such as `DiscordChannel`, `SecurityPolicy`, `SqliteMemory` over ambiguous names (`Manager`, `Util`, `Helper`).
- **Trait implementers**: keep predictable suffixes (`*Provider`, `*Channel`, `*Tool`, `*Memory`, `*Observer`, `*RuntimeAdapter`).
- **Factory keys**: keep lowercase and stable (`openai`, `discord`, `shell`); avoid adding aliases without migration need.
- **Tests**: use behavior-oriented names (`subject_expected_behavior`) and neutral project-scoped fixtures.
- **Identity-like labels**: if unavoidable, use ZeroClaw-native identifiers only (`ZeroClawAgent`, `zeroclaw_user`, `zeroclaw_node`).
## Architecture Boundary Rules (Required)
Keep architecture extensible and auditable by following these boundaries.
- Extend features via trait implementations + factory registration before considering broad refactors.
- Keep dependency direction contract-first: concrete integrations depend on shared traits/config/util, not on other concrete integrations.
- Avoid cross-subsystem coupling (provider ↔ channel internals, tools mutating security/gateway internals directly, etc.).
- Keep responsibilities single-purpose by module (`agent` orchestration, `channels` transport, `providers` model I/O, `security` policy, `tools` execution, `memory` persistence).
- Introduce shared abstractions only after repeated stable use (rule-of-three) and at least one current caller.
- Treat `src/config/schema.rs` keys as public contract; document compatibility impact, migration steps, and rollback path for changes.
## Naming and Architecture Examples (Bad vs Good)
Use these quick examples to align implementation choices before opening a PR.
### Naming examples
- **Bad**: `Manager`, `Helper`, `doStuff`, `tmp_data`
- **Good**: `DiscordChannel`, `SecurityPolicy`, `send_message`, `channel_allowlist`
- **Bad test name**: `test1` / `works`
- **Good test name**: `allowlist_denies_unknown_user`, `provider_returns_error_on_invalid_model`
- **Bad identity-like label**: `john_user`, `alice_bot`
- **Good identity-like label**: `ZeroClawAgent`, `zeroclaw_user`, `zeroclaw_node`
### Architecture boundary examples
- **Bad**: channel implementation directly imports provider internals to call model APIs.
- **Good**: channel emits normalized `ChannelMessage`; agent/runtime orchestrates provider calls via trait contracts.
- **Bad**: tool mutates gateway/security policy directly from execution path.
- **Good**: tool returns structured `ToolResult`; policy enforcement remains in security/runtime boundaries.
- **Bad**: adding broad shared abstraction before any repeated caller.
- **Good**: keep local logic first; extract shared abstraction only after stable rule-of-three evidence.
- **Bad**: config key changes without migration notes.
- **Good**: config/schema changes include defaults, compatibility impact, migration steps, and rollback guidance.
## How to Add a New Provider ## How to Add a New Provider
Create `src/providers/your_provider.rs`: Create `src/providers/your_provider.rs`:
@ -184,13 +453,19 @@ impl Tool for YourTool {
## Pull Request Checklist ## Pull Request Checklist
- [ ] `cargo fmt` — code is formatted - [ ] PR template sections are completed (including security + rollback)
- [ ] `cargo clippy -- -D warnings` — no warnings - [ ] `./scripts/ci/rust_quality_gate.sh` — merge gate formatter/lint baseline passes
- [ ] `cargo test` — all 129+ tests pass - [ ] `cargo test --locked` — all tests pass locally or skipped tests are explained
- [ ] Optional strict audit: `./scripts/ci/rust_quality_gate.sh --strict` (full repo, run when doing lint cleanup or release-hardening work)
- [ ] Optional strict delta audit: `./scripts/ci/rust_strict_delta_gate.sh` (changed Rust lines only, useful for incremental debt control)
- [ ] New code has inline `#[cfg(test)]` tests - [ ] New code has inline `#[cfg(test)]` tests
- [ ] No new dependencies unless absolutely necessary (we optimize for binary size) - [ ] No new dependencies unless absolutely necessary (we optimize for binary size)
- [ ] README updated if adding user-facing features - [ ] README updated if adding user-facing features
- [ ] Follows existing code patterns and conventions - [ ] Follows existing code patterns and conventions
- [ ] Follows code naming conventions and architecture boundary rules in this guide
- [ ] No personal/sensitive data in code/docs/tests/fixtures/logs/examples/commit messages
- [ ] Test names/messages/fixtures/examples are neutral and project-focused
- [ ] Any required identity-like wording uses ZeroClaw/project-native labels only
## Commit Convention ## Commit Convention
@ -198,6 +473,7 @@ We use [Conventional Commits](https://www.conventionalcommits.org/):
``` ```
feat: add Anthropic provider feat: add Anthropic provider
feat(provider): add Anthropic provider
fix: path traversal edge case with symlinks fix: path traversal edge case with symlinks
docs: update contributing guide docs: update contributing guide
test: add heartbeat unicode parsing tests test: add heartbeat unicode parsing tests
@ -205,6 +481,10 @@ refactor: extract common security checks
chore: bump tokio to 1.43 chore: bump tokio to 1.43
``` ```
Recommended scope keys in commit titles:
- `provider`, `channel`, `memory`, `security`, `runtime`, `ci`, `docs`, `tests`
## Code Style ## Code Style
- **Minimal dependencies** — every crate adds to binary size - **Minimal dependencies** — every crate adds to binary size
@ -218,6 +498,18 @@ chore: bump tokio to 1.43
- **Bugs**: Include OS, Rust version, steps to reproduce, expected vs actual - **Bugs**: Include OS, Rust version, steps to reproduce, expected vs actual
- **Features**: Describe the use case, propose which trait to extend - **Features**: Describe the use case, propose which trait to extend
- **Security**: See [SECURITY.md](SECURITY.md) for responsible disclosure - **Security**: See [SECURITY.md](SECURITY.md) for responsible disclosure
- **Privacy**: Redact/anonymize all personal data and sensitive identifiers before posting logs/payloads
## Maintainer Merge Policy
- Require passing `CI Required Gate` before merge.
- Require docs quality checks when docs are touched.
- Require review approval for non-trivial changes.
- Require CODEOWNERS review for protected paths.
- Use risk labels to determine review depth, scope labels (`core`, `provider`, `channel`, `security`, etc.) to route ownership, and module labels (`<module>:<component>`, e.g. `channel:telegram`, `provider:kimi`, `tool:shell`) to route subsystem expertise.
- Contributor tier labels are auto-applied on PRs and issues by merged PR count: `experienced contributor` (>=10), `principal contributor` (>=20), `distinguished contributor` (>=50). Treat them as read-only automation labels; manual edits are auto-corrected.
- Prefer squash merge with conventional commit title.
- Revert fast on regressions; re-land with tests.
## License ## License

2676
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,15 @@
[workspace]
members = ["."]
resolver = "2"
[package] [package]
name = "zeroclaw" name = "zeroclaw"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
authors = ["theonlyhennygod"] authors = ["theonlyhennygod"]
license = "MIT" license = "Apache-2.0"
description = "Zero overhead. Zero compromise. 100% Rust. The fastest, smallest AI assistant." description = "Zero overhead. Zero compromise. 100% Rust. The fastest, smallest AI assistant."
repository = "https://github.com/theonlyhennygod/zeroclaw" repository = "https://github.com/zeroclaw-labs/zeroclaw"
readme = "README.md" readme = "README.md"
keywords = ["ai", "agent", "cli", "assistant", "chatbot"] keywords = ["ai", "agent", "cli", "assistant", "chatbot"]
categories = ["command-line-utilities", "api-bindings"] categories = ["command-line-utilities", "api-bindings"]
@ -26,12 +30,21 @@ serde_json = { version = "1.0", default-features = false, features = ["std"] }
# Config # Config
directories = "5.0" directories = "5.0"
toml = "0.8" toml = "1.0"
shellexpand = "3.1" shellexpand = "3.1"
# Logging - minimal # Logging - minimal
tracing = { version = "0.1", default-features = false } tracing = { version = "0.1", default-features = false }
tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "ansi"] } tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "ansi", "env-filter"] }
# Observability - Prometheus metrics
prometheus = { version = "0.14", default-features = false }
# Base64 encoding (screenshots, image data)
base64 = "0.22"
# Optional Rust-native browser automation backend
fantoccini = { version = "0.22.0", optional = true, default-features = false, features = ["rustls-tls"] }
# Error handling # Error handling
anyhow = "1.0" anyhow = "1.0"
@ -43,38 +56,109 @@ uuid = { version = "1.11", default-features = false, features = ["v4", "std"] }
# Authenticated encryption (AEAD) for secret store # Authenticated encryption (AEAD) for secret store
chacha20poly1305 = "0.10" chacha20poly1305 = "0.10"
# HMAC for webhook signature verification
hmac = "0.12"
sha2 = "0.10"
hex = "0.4"
# CSPRNG for secure token generation
rand = "0.8"
# Fast mutexes that don't poison on panic
parking_lot = "0.12"
# Async traits # Async traits
async-trait = "0.1" async-trait = "0.1"
# HMAC-SHA256 (Zhipu/GLM JWT auth) # HMAC-SHA256 (Zhipu/GLM JWT auth)
ring = "0.17" ring = "0.17"
# Protobuf encode/decode (Feishu WS long-connection frame codec)
prost = { version = "0.14", default-features = false }
# Memory / persistence # Memory / persistence
rusqlite = { version = "0.32", features = ["bundled"] } rusqlite = { version = "0.38", features = ["bundled"] }
chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } chrono = { version = "0.4", default-features = false, features = ["clock", "std", "serde"] }
chrono-tz = "0.10"
cron = "0.12" cron = "0.12"
# Interactive CLI prompts # Interactive CLI prompts
dialoguer = { version = "0.11", features = ["fuzzy-select"] } dialoguer = { version = "0.12", features = ["fuzzy-select"] }
console = "0.15" console = "0.15"
# Hardware discovery (device path globbing)
glob = "0.3"
# Discord WebSocket gateway # Discord WebSocket gateway
tokio-tungstenite = { version = "0.24", features = ["rustls-tls-webpki-roots"] } tokio-tungstenite = { version = "0.24", features = ["rustls-tls-webpki-roots"] }
futures-util = { version = "0.3", default-features = false, features = ["sink"] } futures-util = { version = "0.3", default-features = false, features = ["sink"] }
futures = "0.3"
regex = "1.10"
hostname = "0.4.2" hostname = "0.4.2"
lettre = { version = "0.11.19", default-features = false, features = ["builder", "smtp-transport", "rustls-tls"] }
mail-parser = "0.11.2"
rustls = "0.23"
rustls-pki-types = "1.14.0"
tokio-rustls = "0.26.4"
webpki-roots = "1.0.6"
# HTTP server (gateway) — replaces raw TCP for proper HTTP/1.1 compliance # HTTP server (gateway) — replaces raw TCP for proper HTTP/1.1 compliance
axum = { version = "0.7", default-features = false, features = ["http1", "json", "tokio", "query"] } axum = { version = "0.8", default-features = false, features = ["http1", "json", "tokio", "query", "ws"] }
tower = { version = "0.5", default-features = false } tower = { version = "0.5", default-features = false }
tower-http = { version = "0.6", default-features = false, features = ["limit", "timeout"] } tower-http = { version = "0.6", default-features = false, features = ["limit", "timeout"] }
http-body-util = "0.1" http-body-util = "0.1"
# OpenTelemetry — OTLP trace + metrics export
opentelemetry = { version = "0.31", default-features = false, features = ["trace", "metrics"] }
opentelemetry_sdk = { version = "0.31", default-features = false, features = ["trace", "metrics"] }
opentelemetry-otlp = { version = "0.31", default-features = false, features = ["trace", "metrics", "http-proto", "reqwest-client", "reqwest-rustls-webpki-roots"] }
# USB device enumeration (hardware discovery)
nusb = { version = "0.2", default-features = false, optional = true }
# Serial port for peripheral communication (STM32, etc.)
tokio-serial = { version = "5", default-features = false, optional = true }
# probe-rs for STM32/Nucleo memory read (Phase B)
probe-rs = { version = "0.30", optional = true }
# PDF extraction for datasheet RAG (optional, enable with --features rag-pdf)
pdf-extract = { version = "0.10", optional = true }
# Raspberry Pi GPIO / Landlock (Linux only) — target-specific to avoid compile failure on macOS
[target.'cfg(target_os = "linux")'.dependencies]
rppal = { version = "0.14", optional = true }
landlock = { version = "0.4", optional = true }
[features]
default = ["hardware"]
hardware = ["nusb", "tokio-serial"]
peripheral-rpi = ["rppal"]
# Browser backend feature alias used by cfg(feature = "browser-native")
browser-native = ["dep:fantoccini"]
# Backward-compatible alias for older invocations
fantoccini = ["browser-native"]
# Sandbox feature aliases used by cfg(feature = "sandbox-*")
sandbox-landlock = ["dep:landlock"]
sandbox-bubblewrap = []
# Backward-compatible alias for older invocations
landlock = ["sandbox-landlock"]
# probe = probe-rs for Nucleo memory read (adds ~50 deps; optional)
probe = ["dep:probe-rs"]
# rag-pdf = PDF ingestion for datasheet RAG
rag-pdf = ["dep:pdf-extract"]
[profile.release] [profile.release]
opt-level = "z" # Optimize for size opt-level = "z" # Optimize for size
lto = true # Link-time optimization lto = "thin" # Lower memory use during release builds
codegen-units = 1 # Better optimization codegen-units = 1 # Serialized codegen for low-memory devices (e.g., Raspberry Pi 3 with 1GB RAM)
strip = true # Remove debug symbols # Higher values (e.g., 8) compile faster but require more RAM during compilation
panic = "abort" # Reduce binary size strip = true # Remove debug symbols
panic = "abort" # Reduce binary size
[profile.release-fast]
inherits = "release"
codegen-units = 8 # Parallel codegen for faster builds on powerful machines (16GB+ RAM recommended)
# Use: cargo build --profile release-fast
[profile.dist] [profile.dist]
inherits = "release" inherits = "release"

View file

@ -1,41 +1,115 @@
# syntax=docker/dockerfile:1.7
# ── Stage 1: Build ──────────────────────────────────────────── # ── Stage 1: Build ────────────────────────────────────────────
FROM rust:1.83-slim AS builder FROM rust:1.92-slim@sha256:bf3368a992915f128293ac76917ab6e561e4dda883273c8f5c9f6f8ea37a378e AS builder
WORKDIR /app WORKDIR /app
# Install build dependencies
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update && apt-get install -y \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# 1. Copy manifests to cache dependencies
COPY Cargo.toml Cargo.lock ./ COPY Cargo.toml Cargo.lock ./
COPY src/ src/ # Create dummy main.rs to build dependencies
RUN mkdir src && echo "fn main() {}" > src/main.rs
RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
--mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
--mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
cargo build --release --locked
RUN rm -rf src
RUN cargo build --release --locked && \ # 2. Copy source code
strip target/release/zeroclaw COPY . .
RUN --mount=type=cache,id=zeroclaw-cargo-registry,target=/usr/local/cargo/registry,sharing=locked \
--mount=type=cache,id=zeroclaw-cargo-git,target=/usr/local/cargo/git,sharing=locked \
--mount=type=cache,id=zeroclaw-target,target=/app/target,sharing=locked \
cargo build --release --locked && \
cp target/release/zeroclaw /app/zeroclaw && \
strip /app/zeroclaw
# ── Stage 2: Runtime (distroless nonroot — no shell, no OS, tiny, UID 65534) ── # ── Stage 2: Permissions & Config Prep ───────────────────────
FROM gcr.io/distroless/cc-debian12:nonroot FROM busybox:1.37@sha256:b3255e7dfbcd10cb367af0d409747d511aeb66dfac98cf30e97e87e4207dd76f AS permissions
# Create directory structure (simplified workspace path)
RUN mkdir -p /zeroclaw-data/.zeroclaw /zeroclaw-data/workspace
COPY --from=builder /app/target/release/zeroclaw /usr/local/bin/zeroclaw # Create minimal config for PRODUCTION (allows binding to public interfaces)
# NOTE: Provider configuration must be done via environment variables at runtime
RUN cat > /zeroclaw-data/.zeroclaw/config.toml <<EOF
workspace_dir = "/zeroclaw-data/workspace"
config_path = "/zeroclaw-data/.zeroclaw/config.toml"
api_key = ""
default_provider = "openrouter"
default_model = "anthropic/claude-sonnet-4-20250514"
default_temperature = 0.7
# Default workspace and data directory (owned by nonroot user) [gateway]
VOLUME ["/data"] port = 3000
ENV ZEROCLAW_WORKSPACE=/data/workspace host = "[::]"
allow_public_bind = true
EOF
# ── Environment variable configuration (Docker-native setup) ── RUN chown -R 65534:65534 /zeroclaw-data
# These can be overridden at runtime via docker run -e or docker-compose
#
# Required:
# API_KEY or ZEROCLAW_API_KEY - Your LLM provider API key
#
# Optional:
# PROVIDER or ZEROCLAW_PROVIDER - LLM provider (default: openrouter)
# Options: openrouter, openai, anthropic, ollama
# ZEROCLAW_MODEL - Model to use (default: anthropic/claude-sonnet-4-20250514)
# PORT or ZEROCLAW_GATEWAY_PORT - Gateway port (default: 3000)
#
# Example:
# docker run -e API_KEY=sk-... -e PROVIDER=openrouter zeroclaw/zeroclaw
# Explicitly set non-root user (distroless:nonroot defaults to 65534, but be explicit) # ── Stage 3: Development Runtime (Debian) ────────────────────
FROM debian:trixie-slim@sha256:f6e2cfac5cf956ea044b4bd75e6397b4372ad88fe00908045e9a0d21712ae3ba AS dev
# Install runtime dependencies + basic debug tools
RUN apt-get update && apt-get install -y \
ca-certificates \
openssl \
curl \
git \
iputils-ping \
vim \
&& rm -rf /var/lib/apt/lists/*
COPY --from=permissions /zeroclaw-data /zeroclaw-data
COPY --from=builder /app/zeroclaw /usr/local/bin/zeroclaw
# Overwrite minimal config with DEV template (Ollama defaults)
COPY dev/config.template.toml /zeroclaw-data/.zeroclaw/config.toml
RUN chown 65534:65534 /zeroclaw-data/.zeroclaw/config.toml
# Environment setup
# Use consistent workspace path
ENV ZEROCLAW_WORKSPACE=/zeroclaw-data/workspace
ENV HOME=/zeroclaw-data
# Defaults for local dev (Ollama) - matches config.template.toml
ENV PROVIDER="ollama"
ENV ZEROCLAW_MODEL="llama3.2"
ENV ZEROCLAW_GATEWAY_PORT=3000
# Note: API_KEY is intentionally NOT set here to avoid confusion.
# It is set in config.toml as the Ollama URL.
WORKDIR /zeroclaw-data
USER 65534:65534 USER 65534:65534
EXPOSE 3000 EXPOSE 3000
ENTRYPOINT ["zeroclaw"] ENTRYPOINT ["zeroclaw"]
CMD ["gateway"] CMD ["gateway", "--port", "3000", "--host", "[::]"]
# ── Stage 4: Production Runtime (Distroless) ─────────────────
FROM gcr.io/distroless/cc-debian13:nonroot@sha256:84fcd3c223b144b0cb6edc5ecc75641819842a9679a3a58fd6294bec47532bf7 AS release
COPY --from=builder /app/zeroclaw /usr/local/bin/zeroclaw
COPY --from=permissions /zeroclaw-data /zeroclaw-data
# Environment setup
ENV ZEROCLAW_WORKSPACE=/zeroclaw-data/workspace
ENV HOME=/zeroclaw-data
# Default provider (model is set in config.toml, not here,
# so config file edits are not silently overridden)
ENV PROVIDER="openrouter"
ENV ZEROCLAW_GATEWAY_PORT=3000
# API_KEY must be provided at runtime!
WORKDIR /zeroclaw-data
USER 65534:65534
EXPOSE 3000
ENTRYPOINT ["zeroclaw"]
CMD ["gateway", "--port", "3000", "--host", "[::]"]

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2025-2026 theonlyhennygod Copyright (c) 2025 ZeroClaw Labs
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
@ -19,3 +19,10 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
================================================================================
This product includes software developed by ZeroClaw Labs and contributors:
https://github.com/zeroclaw-labs/zeroclaw/graphs/contributors
See NOTICE file for full contributor attribution.

313
README.md
View file

@ -5,19 +5,28 @@
<h1 align="center">ZeroClaw 🦀</h1> <h1 align="center">ZeroClaw 🦀</h1>
<p align="center"> <p align="center">
<strong>Zero overhead. Zero compromise. 100% Rust. 100% Agnostic.</strong> <strong>Zero overhead. Zero compromise. 100% Rust. 100% Agnostic.</strong><br>
⚡️ <strong>Runs on $10 hardware with <5MB RAM: That's 99% less memory than OpenClaw and 98% cheaper than a Mac mini!</strong>
</p> </p>
<p align="center"> <p align="center">
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License: MIT" /></a> <a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License: MIT" /></a>
<a href="NOTICE"><img src="https://img.shields.io/badge/contributors-27+-green.svg" alt="Contributors" /></a>
</p> </p>
Fast, small, and fully autonomous AI assistant infrastructure — deploy anywhere, swap anything. Fast, small, and fully autonomous AI assistant infrastructure — deploy anywhere, swap anything.
``` ```
~3.4MB binary · <10ms startup · 1,017 tests · 22+ providers · 8 traits · Pluggable everything ~3.4MB binary · <10ms startup · 1,017 tests · 23+ providers · 8 traits · Pluggable everything
``` ```
### ✨ Features
- 🏎️ **Ultra-Lightweight:** <5MB Memory footprint 99% smaller than OpenClaw core.
- 💰 **Minimal Cost:** Efficient enough to run on $10 Hardware — 98% cheaper than a Mac mini.
- ⚡ **Lightning Fast:** 400X Faster startup time, boot in <10ms (under 1s even on 0.6GHz cores).
- 🌍 **True Portability:** Single self-contained binary across ARM, x86, and RISC-V.
### Why teams pick ZeroClaw ### Why teams pick ZeroClaw
- **Lean by default:** small Rust binary, fast startup, low memory footprint. - **Lean by default:** small Rust binary, fast startup, low memory footprint.
@ -27,17 +36,21 @@ Fast, small, and fully autonomous AI assistant infrastructure — deploy anywher
## Benchmark Snapshot (ZeroClaw vs OpenClaw) ## Benchmark Snapshot (ZeroClaw vs OpenClaw)
Local machine quick benchmark (macOS arm64, Feb 2026), same host, 3 runs each. Local machine quick benchmark (macOS arm64, Feb 2026) normalized for 0.8GHz edge hardware.
| Metric | ZeroClaw (Rust release binary) | OpenClaw (Node + built `dist`) | | | OpenClaw | NanoBot | PicoClaw | ZeroClaw 🦀 |
|---|---:|---:| |---|---|---|---|---|
| Build output size | `target/release/zeroclaw`: **3.4 MB** | `dist/`: **28 MB** | | **Language** | TypeScript | Python | Go | **Rust** |
| `--help` startup (cold/warm) | **0.38s / ~0.00s** | **3.31s / ~1.11s** | | **RAM** | > 1GB | > 100MB | < 10MB | **< 5MB** |
| `status` command runtime (best of 3) | **~0.00s** | **5.98s** | | **Startup (0.8GHz core)** | > 500s | > 30s | < 1s | **< 10ms** |
| `--help` max RSS observed | **~7.3 MB** | **~394 MB** | | **Binary Size** | ~28MB (dist) | N/A (Scripts) | ~8MB | **3.4 MB** |
| `status` max RSS observed | **~7.8 MB** | **~1.52 GB** | | **Cost** | Mac Mini $599 | Linux SBC ~$50 | Linux Board $10 | **Any hardware $10** |
> Notes: measured with `/usr/bin/time -l`; first run includes cold-start effects. OpenClaw results were measured after `pnpm install` + `pnpm build`. > Notes: ZeroClaw results measured with `/usr/bin/time -l` on release builds. OpenClaw requires Node.js runtime (~390MB overhead). PicoClaw and ZeroClaw are static binaries.
<p align="center">
<img src="zero-claw.jpeg" alt="ZeroClaw vs OpenClaw Comparison" width="800" />
</p>
Reproduce ZeroClaw numbers locally: Reproduce ZeroClaw numbers locally:
@ -49,13 +62,78 @@ ls -lh target/release/zeroclaw
/usr/bin/time -l target/release/zeroclaw status /usr/bin/time -l target/release/zeroclaw status
``` ```
## Prerequisites
<details>
<summary><strong>Windows</strong></summary>
#### Required
1. **Visual Studio Build Tools** (provides the MSVC linker and Windows SDK):
```powershell
winget install Microsoft.VisualStudio.2022.BuildTools
```
During installation (or via the Visual Studio Installer), select the **"Desktop development with C++"** workload.
2. **Rust toolchain:**
```powershell
winget install Rustlang.Rustup
```
After installation, open a new terminal and run `rustup default stable` to ensure the stable toolchain is active.
3. **Verify** both are working:
```powershell
rustc --version
cargo --version
```
#### Optional
- **Docker Desktop** — required only if using the [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Install via `winget install Docker.DockerDesktop`.
</details>
<details>
<summary><strong>Linux / macOS</strong></summary>
#### Required
1. **Build essentials:**
- **Linux (Debian/Ubuntu):** `sudo apt install build-essential pkg-config`
- **Linux (Fedora/RHEL):** `sudo dnf groupinstall "Development Tools" && sudo dnf install pkg-config`
- **macOS:** Install Xcode Command Line Tools: `xcode-select --install`
2. **Rust toolchain:**
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
See [rustup.rs](https://rustup.rs) for details.
3. **Verify** both are working:
```bash
rustc --version
cargo --version
```
#### Optional
- **Docker** — required only if using the [Docker sandboxed runtime](#runtime-support-current) (`runtime.kind = "docker"`). Install via your package manager or [docker.com](https://docs.docker.com/engine/install/).
> **Note:** The default `cargo build --release` uses `codegen-units=1` for compatibility with low-memory devices (e.g., Raspberry Pi 3 with 1GB RAM). For faster builds on powerful machines, use `cargo build --profile release-fast`.
</details>
## Quick Start ## Quick Start
```bash ```bash
git clone https://github.com/theonlyhennygod/zeroclaw.git git clone https://github.com/zeroclaw-labs/zeroclaw.git
cd zeroclaw cd zeroclaw
cargo build --release cargo build --release --locked
cargo install --path . --force cargo install --path . --force --locked
# Ensure ~/.cargo/bin is in your PATH
export PATH="$HOME/.cargo/bin:$PATH"
# Quick setup (no prompts) # Quick setup (no prompts)
zeroclaw onboard --api-key sk-... --provider openrouter zeroclaw onboard --api-key sk-... --provider openrouter
@ -88,6 +166,9 @@ zeroclaw doctor
# Check channel health # Check channel health
zeroclaw channel doctor zeroclaw channel doctor
# Bind a Telegram identity into allowlist
zeroclaw channel bind-telegram 123456789
# Get integration setup details # Get integration setup details
zeroclaw integrations info Telegram zeroclaw integrations info Telegram
@ -112,12 +193,12 @@ Every subsystem is a **trait** — swap implementations with a config change, ze
| Subsystem | Trait | Ships with | Extend | | Subsystem | Trait | Ships with | Extend |
|-----------|-------|------------|--------| |-----------|-------|------------|--------|
| **AI Models** | `Provider` | 22+ providers (OpenRouter, Anthropic, OpenAI, Ollama, Venice, Groq, Mistral, xAI, DeepSeek, Together, Fireworks, Perplexity, Cohere, Bedrock, etc.) | `custom:https://your-api.com` — any OpenAI-compatible API | | **AI Models** | `Provider` | 23+ providers (OpenRouter, Anthropic, OpenAI, Ollama, Venice, Groq, Mistral, xAI, DeepSeek, Together, Fireworks, Perplexity, Cohere, Bedrock, Astrai, etc.) | `custom:https://your-api.com` — any OpenAI-compatible API |
| **Channels** | `Channel` | CLI, Telegram, Discord, Slack, iMessage, Matrix, WhatsApp, Webhook | Any messaging API | | **Channels** | `Channel` | CLI, Telegram, Discord, Slack, Mattermost, iMessage, Matrix, WhatsApp, Webhook | Any messaging API |
| **Memory** | `Memory` | SQLite with hybrid search (FTS5 + vector cosine similarity), Markdown | Any persistence backend | | **Memory** | `Memory` | SQLite with hybrid search (FTS5 + vector cosine similarity), Lucid bridge (CLI sync + SQLite fallback), Markdown | Any persistence backend |
| **Tools** | `Tool` | shell, file_read, file_write, memory_store, memory_recall, memory_forget, browser_open (Brave + allowlist), composio (optional) | Any capability | | **Tools** | `Tool` | shell, file_read, file_write, memory_store, memory_recall, memory_forget, browser_open (Brave + allowlist), browser (agent-browser / rust-native), composio (optional) | Any capability |
| **Observability** | `Observer` | Noop, Log, Multi | Prometheus, OTel | | **Observability** | `Observer` | Noop, Log, Multi | Prometheus, OTel |
| **Runtime** | `RuntimeAdapter` | Native (Mac/Linux/Pi) | Docker, WASM (planned; unsupported kinds fail fast) | | **Runtime** | `RuntimeAdapter` | Native, Docker (sandboxed) | WASM (planned; unsupported kinds fail fast) |
| **Security** | `SecurityPolicy` | Gateway pairing, sandbox, allowlists, rate limits, filesystem scoping, encrypted secrets | — | | **Security** | `SecurityPolicy` | Gateway pairing, sandbox, allowlists, rate limits, filesystem scoping, encrypted secrets | — |
| **Identity** | `IdentityConfig` | OpenClaw (markdown), AIEOS v1.1 (JSON) | Any identity format | | **Identity** | `IdentityConfig` | OpenClaw (markdown), AIEOS v1.1 (JSON) | Any identity format |
| **Tunnel** | `Tunnel` | None, Cloudflare, Tailscale, ngrok, Custom | Any tunnel binary | | **Tunnel** | `Tunnel` | None, Cloudflare, Tailscale, ngrok, Custom | Any tunnel binary |
@ -127,8 +208,8 @@ Every subsystem is a **trait** — swap implementations with a config change, ze
### Runtime support (current) ### Runtime support (current)
- ✅ Supported today: `runtime.kind = "native"` - ✅ Supported today: `runtime.kind = "native"` or `runtime.kind = "docker"`
- 🚧 Planned, not implemented yet: Docker / WASM / edge runtimes - 🚧 Planned, not implemented yet: WASM / edge runtimes
When an unsupported `runtime.kind` is configured, ZeroClaw now exits with a clear error instead of silently falling back to native. When an unsupported `runtime.kind` is configured, ZeroClaw now exits with a clear error instead of silently falling back to native.
@ -150,11 +231,21 @@ The agent automatically recalls, saves, and manages memory via tools.
```toml ```toml
[memory] [memory]
backend = "sqlite" # "sqlite", "markdown", "none" backend = "sqlite" # "sqlite", "lucid", "markdown", "none"
auto_save = true auto_save = true
embedding_provider = "openai" embedding_provider = "openai"
vector_weight = 0.7 vector_weight = 0.7
keyword_weight = 0.3 keyword_weight = 0.3
# backend = "none" uses an explicit no-op memory backend (no persistence)
# Optional for backend = "lucid"
# ZEROCLAW_LUCID_CMD=/usr/local/bin/lucid # default: lucid
# ZEROCLAW_LUCID_BUDGET=200 # default: 200
# ZEROCLAW_LUCID_LOCAL_HIT_THRESHOLD=3 # local hit count to skip external recall
# ZEROCLAW_LUCID_RECALL_TIMEOUT_MS=120 # low-latency budget for lucid context recall
# ZEROCLAW_LUCID_STORE_TIMEOUT_MS=800 # async sync timeout for lucid store
# ZEROCLAW_LUCID_FAILURE_COOLDOWN_MS=15000 # cooldown after lucid failure to avoid repeated slow attempts
``` ```
## Security ## Security
@ -172,7 +263,7 @@ ZeroClaw enforces security at **every layer** — not just the sandbox. It passe
> **Run your own nmap:** `nmap -p 1-65535 <your-host>` — ZeroClaw binds to localhost only, so nothing is exposed unless you explicitly configure a tunnel. > **Run your own nmap:** `nmap -p 1-65535 <your-host>` — ZeroClaw binds to localhost only, so nothing is exposed unless you explicitly configure a tunnel.
### Channel allowlists (Telegram / Discord / Slack) ### Channel allowlists (Telegram / Discord / Slack / Mattermost)
Inbound sender policy is now consistent: Inbound sender policy is now consistent:
@ -187,8 +278,22 @@ Recommended low-friction setup (secure + fast):
- **Telegram:** allowlist your own `@username` (without `@`) and/or your numeric Telegram user ID. - **Telegram:** allowlist your own `@username` (without `@`) and/or your numeric Telegram user ID.
- **Discord:** allowlist your own Discord user ID. - **Discord:** allowlist your own Discord user ID.
- **Slack:** allowlist your own Slack member ID (usually starts with `U`). - **Slack:** allowlist your own Slack member ID (usually starts with `U`).
- **Mattermost:** uses standard API v4. Allowlists use Mattermost user IDs.
- Use `"*"` only for temporary open testing. - Use `"*"` only for temporary open testing.
Telegram operator-approval flow:
1. Keep `[channels_config.telegram].allowed_users = []` for deny-by-default startup.
2. Unauthorized users receive a hint with a copyable operator command:
`zeroclaw channel bind-telegram <IDENTITY>`.
3. Operator runs that command locally, then user retries sending a message.
If you need a one-shot manual approval, run:
```bash
zeroclaw channel bind-telegram 123456789
```
If you're not sure which identity to use: If you're not sure which identity to use:
1. Start channels and send one message to your bot. 1. Start channels and send one message to your bot.
@ -202,6 +307,21 @@ rerun channel setup only:
zeroclaw onboard --channels-only zeroclaw onboard --channels-only
``` ```
### Telegram media replies
Telegram routing now replies to the source **chat ID** from incoming updates (instead of usernames),
which avoids `Bad Request: chat not found` failures.
For non-text replies, ZeroClaw can send Telegram attachments when the assistant includes markers:
- `[IMAGE:<path-or-url>]`
- `[DOCUMENT:<path-or-url>]`
- `[VIDEO:<path-or-url>]`
- `[AUDIO:<path-or-url>]`
- `[VOICE:<path-or-url>]`
Paths can be local files (for example `/tmp/screenshot.png`) or HTTPS URLs.
### WhatsApp Business Cloud API Setup ### WhatsApp Business Cloud API Setup
WhatsApp uses Meta's Cloud API with webhooks (push-based, not polling): WhatsApp uses Meta's Cloud API with webhooks (push-based, not polling):
@ -250,12 +370,14 @@ default_model = "anthropic/claude-sonnet-4-20250514"
default_temperature = 0.7 default_temperature = 0.7
[memory] [memory]
backend = "sqlite" # "sqlite", "markdown", "none" backend = "sqlite" # "sqlite", "lucid", "markdown", "none"
auto_save = true auto_save = true
embedding_provider = "openai" # "openai", "noop" embedding_provider = "openai" # "openai", "noop"
vector_weight = 0.7 vector_weight = 0.7
keyword_weight = 0.3 keyword_weight = 0.3
# backend = "none" disables persistent memory via no-op backend
[gateway] [gateway]
require_pairing = true # require pairing code on first connect require_pairing = true # require pairing code on first connect
allow_public_bind = false # refuse 0.0.0.0 without tunnel allow_public_bind = false # refuse 0.0.0.0 without tunnel
@ -267,7 +389,16 @@ allowed_commands = ["git", "npm", "cargo", "ls", "cat", "grep"]
forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"] forbidden_paths = ["/etc", "/root", "/proc", "/sys", "~/.ssh", "~/.gnupg", "~/.aws"]
[runtime] [runtime]
kind = "native" # only supported value right now; unsupported kinds fail fast kind = "native" # "native" or "docker"
[runtime.docker]
image = "alpine:3.20" # container image for shell execution
network = "none" # docker network mode ("none", "bridge", etc.)
memory_limit_mb = 512 # optional memory limit in MB
cpu_limit = 1.0 # optional CPU limit
read_only_rootfs = true # mount root filesystem as read-only
mount_workspace = true # mount workspace into /workspace
allowed_workspace_roots = [] # optional allowlist for workspace mount validation
[heartbeat] [heartbeat]
enabled = false enabled = false
@ -280,11 +411,40 @@ provider = "none" # "none", "cloudflare", "tailscale", "ngrok", "c
encrypt = true # API keys encrypted with local key file encrypt = true # API keys encrypted with local key file
[browser] [browser]
enabled = false # opt-in browser_open tool enabled = false # opt-in browser_open + browser tools
allowed_domains = ["docs.rs"] # required when browser is enabled allowed_domains = ["docs.rs"] # required when browser is enabled
backend = "agent_browser" # "agent_browser" (default), "rust_native", "computer_use", "auto"
native_headless = true # applies when backend uses rust-native
native_webdriver_url = "http://127.0.0.1:9515" # WebDriver endpoint (chromedriver/selenium)
# native_chrome_path = "/usr/bin/chromium" # optional explicit browser binary for driver
[browser.computer_use]
endpoint = "http://127.0.0.1:8787/v1/actions" # computer-use sidecar HTTP endpoint
timeout_ms = 15000 # per-action timeout
allow_remote_endpoint = false # secure default: only private/localhost endpoint
window_allowlist = [] # optional window title/process allowlist hints
# api_key = "..." # optional bearer token for sidecar
# max_coordinate_x = 3840 # optional coordinate guardrail
# max_coordinate_y = 2160 # optional coordinate guardrail
# Rust-native backend build flag:
# cargo build --release --features browser-native
# Ensure a WebDriver server is running, e.g. chromedriver --port=9515
# Computer-use sidecar contract (MVP)
# POST browser.computer_use.endpoint
# Request: {
# "action": "mouse_click",
# "params": {"x": 640, "y": 360, "button": "left"},
# "policy": {"allowed_domains": [...], "window_allowlist": [...], "max_coordinate_x": 3840, "max_coordinate_y": 2160},
# "metadata": {"session_name": "...", "source": "zeroclaw.browser", "version": "..."}
# }
# Response: {"success": true, "data": {...}} or {"success": false, "error": "..."}
[composio] [composio]
enabled = false # opt-in: 1000+ OAuth apps via composio.dev enabled = false # opt-in: 1000+ OAuth apps via composio.dev
# api_key = "cmp_..." # optional: stored encrypted when [secrets].encrypt = true
entity_id = "default" # default user_id for Composio tool calls
[identity] [identity]
format = "openclaw" # "openclaw" (default, markdown files) or "aieos" (JSON) format = "openclaw" # "openclaw" (default, markdown files) or "aieos" (JSON)
@ -292,6 +452,57 @@ format = "openclaw" # "openclaw" (default, markdown files) or "aieos
# aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON # aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON
``` ```
### Ollama Local and Remote Endpoints
ZeroClaw uses one provider key (`ollama`) for both local and remote Ollama deployments:
- Local Ollama: keep `api_url` unset, run `ollama serve`, and use models like `llama3.2`.
- Remote Ollama endpoint (including Ollama Cloud): set `api_url` to the remote endpoint and set `api_key` (or `OLLAMA_API_KEY`) when required.
- Optional `:cloud` suffix: model IDs like `qwen3:cloud` are normalized to `qwen3` before the request.
Example remote configuration:
```toml
default_provider = "ollama"
default_model = "qwen3:cloud"
api_url = "https://ollama.com"
api_key = "ollama_api_key_here"
```
## Python Companion Package (`zeroclaw-tools`)
For LLM providers with inconsistent native tool calling (e.g., GLM-5/Zhipu), ZeroClaw ships a Python companion package with **LangGraph-based tool calling** for guaranteed consistency:
```bash
pip install zeroclaw-tools
```
```python
from zeroclaw_tools import create_agent, shell, file_read
from langchain_core.messages import HumanMessage
# Works with any OpenAI-compatible provider
agent = create_agent(
tools=[shell, file_read],
model="glm-5",
api_key="your-key",
base_url="https://api.z.ai/api/coding/paas/v4"
)
result = await agent.ainvoke({
"messages": [HumanMessage(content="List files in /tmp")]
})
print(result["messages"][-1].content)
```
**Why use it:**
- **Consistent tool calling** across all providers (even those with poor native support)
- **Automatic tool loop** — keeps calling tools until the task is complete
- **Easy extensibility** — add custom tools with `@tool` decorator
- **Discord bot integration** included (Telegram planned)
See [`python/README.md`](python/README.md) for full documentation.
## Identity System (AIEOS Support) ## Identity System (AIEOS Support)
ZeroClaw supports **identity-agnostic** AI personas through two formats: ZeroClaw supports **identity-agnostic** AI personas through two formats:
@ -386,13 +597,15 @@ See [aieos.org](https://aieos.org) for the full schema and live examples.
| `doctor` | Diagnose daemon/scheduler/channel freshness | | `doctor` | Diagnose daemon/scheduler/channel freshness |
| `status` | Show full system status | | `status` | Show full system status |
| `channel doctor` | Run health checks for configured channels | | `channel doctor` | Run health checks for configured channels |
| `channel bind-telegram <IDENTITY>` | Add one Telegram username/user ID to allowlist |
| `integrations info <name>` | Show setup/status details for one integration | | `integrations info <name>` | Show setup/status details for one integration |
## Development ## Development
```bash ```bash
cargo build # Dev build cargo build # Dev build
cargo build --release # Release build (~3.4MB) cargo build --release # Release build (codegen-units=1, works on all devices including Raspberry Pi)
cargo build --profile release-fast # Faster build (codegen-units=8, requires 16GB+ RAM)
cargo test # 1,017 tests cargo test # 1,017 tests
cargo clippy # Lint (0 warnings) cargo clippy # Lint (0 warnings)
cargo fmt # Format cargo fmt # Format
@ -409,19 +622,53 @@ A git hook runs `cargo fmt --check`, `cargo clippy -- -D warnings`, and `cargo t
git config core.hooksPath .githooks git config core.hooksPath .githooks
``` ```
### Build troubleshooting (Linux OpenSSL errors)
If you see an `openssl-sys` build error, sync dependencies and rebuild with the repository lockfile:
```bash
git pull
cargo build --release --locked
cargo install --path . --force --locked
```
ZeroClaw is configured to use `rustls` for HTTP/TLS dependencies; `--locked` keeps the transitive graph deterministic on fresh environments.
To skip the hook when you need a quick push during development: To skip the hook when you need a quick push during development:
```bash ```bash
git push --no-verify git push --no-verify
``` ```
## Collaboration & Docs
For high-throughput collaboration and consistent reviews:
- Contribution guide: [CONTRIBUTING.md](CONTRIBUTING.md)
- PR workflow policy: [docs/pr-workflow.md](docs/pr-workflow.md)
- Reviewer playbook (triage + deep review): [docs/reviewer-playbook.md](docs/reviewer-playbook.md)
- CI ownership and triage map: [docs/ci-map.md](docs/ci-map.md)
- Security disclosure policy: [SECURITY.md](SECURITY.md)
### 🙏 Special Thanks
A heartfelt thank you to the communities and institutions that inspire and fuel this open-source work:
- **Harvard University** — for fostering intellectual curiosity and pushing the boundaries of what's possible.
- **MIT** — for championing open knowledge, open source, and the belief that technology should be accessible to everyone.
- **Sundai Club** — for the community, the energy, and the relentless drive to build things that matter.
- **The World & Beyond** 🌍✨ — to every contributor, dreamer, and builder out there making open source a force for good. This is for you.
We're building in the open because the best ideas come from everywhere. If you're reading this, you're part of it. Welcome. 🦀❤️
## License ## License
MIT — see [LICENSE](LICENSE) MIT — see [LICENSE](LICENSE) and [NOTICE](NOTICE) for contributor attribution
## Contributing ## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md). Implement a trait, submit a PR: See [CONTRIBUTING.md](CONTRIBUTING.md). Implement a trait, submit a PR:
- CI workflow guide: [docs/ci-map.md](docs/ci-map.md)
- New `Provider``src/providers/` - New `Provider``src/providers/`
- New `Channel``src/channels/` - New `Channel``src/channels/`
- New `Observer``src/observability/` - New `Observer``src/observability/`
@ -433,3 +680,11 @@ See [CONTRIBUTING.md](CONTRIBUTING.md). Implement a trait, submit a PR:
--- ---
**ZeroClaw** — Zero overhead. Zero compromise. Deploy anywhere. Swap anything. 🦀 **ZeroClaw** — Zero overhead. Zero compromise. Deploy anywhere. Swap anything. 🦀
## Star History
<p align="center">
<a href="https://www.star-history.com/#zeroclaw-labs/zeroclaw&Date">
<img src="https://api.star-history.com/svg?repos=zeroclaw-labs/zeroclaw&type=Date" alt="Star History Chart" />
</a>
</p>

303
RUN_TESTS.md Normal file
View file

@ -0,0 +1,303 @@
# 🧪 Test Execution Guide
## Quick Reference
```bash
# Full automated test suite (~2 min)
./test_telegram_integration.sh
# Quick smoke test (~10 sec)
./quick_test.sh
# Just compile and unit test (~30 sec)
cargo test telegram --lib
```
## 📝 What Was Created For You
### 1. **test_telegram_integration.sh** (Main Test Suite)
- **20+ automated tests** covering all fixes
- **6 test phases**: Code quality, build, config, health, features, manual
- **Colored output** with pass/fail indicators
- **Detailed summary** at the end
```bash
./test_telegram_integration.sh
```
### 2. **quick_test.sh** (Fast Validation)
- **4 essential tests** for quick feedback
- **<10 second** execution time
- Perfect for **pre-commit** checks
```bash
./quick_test.sh
```
### 3. **generate_test_messages.py** (Test Helper)
- Generates test messages of various lengths
- Tests message splitting functionality
- 8 different message types
```bash
# Generate a long message (>4096 chars)
python3 test_helpers/generate_test_messages.py long
# Show all message types
python3 test_helpers/generate_test_messages.py all
```
### 4. **TESTING_TELEGRAM.md** (Complete Guide)
- Comprehensive testing documentation
- Troubleshooting guide
- Performance benchmarks
- CI/CD integration examples
## 🚀 Step-by-Step: First Run
### Step 1: Run Automated Tests
```bash
cd /Users/abdzsam/zeroclaw
# Make scripts executable (already done)
chmod +x test_telegram_integration.sh quick_test.sh
# Run the full test suite
./test_telegram_integration.sh
```
**Expected output:**
```
⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡⚡
███████╗███████╗██████╗ ██████╗ ██████╗██╗ █████╗ ██╗ ██╗
...
🧪 TELEGRAM INTEGRATION TEST SUITE 🧪
Phase 1: Code Quality Tests
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Test 1: Compiling test suite
✓ PASS: Test suite compiles successfully
Test 2: Running Telegram unit tests
✓ PASS: All Telegram unit tests passed (24 tests)
...
Test Summary
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Total Tests: 20
Passed: 20
Failed: 0
Warnings: 0
Pass Rate: 100%
✓ ALL AUTOMATED TESTS PASSED! 🎉
```
### Step 2: Configure Telegram (if not done)
```bash
# Interactive setup
zeroclaw onboard --interactive
# Or channels-only setup
zeroclaw onboard --channels-only
```
When prompted:
1. Select **Telegram** channel
2. Enter your **bot token** from @BotFather
3. Enter your **Telegram user ID** or username
### Step 3: Verify Health
```bash
zeroclaw channel doctor
```
**Expected output:**
```
🩺 ZeroClaw Channel Doctor
✅ Telegram healthy
Summary: 1 healthy, 0 unhealthy, 0 timed out
```
### Step 4: Manual Testing
#### Test 1: Basic Message
```bash
# Terminal 1: Start the channel
zeroclaw channel start
```
**In Telegram:**
- Find your bot
- Send: `Hello bot!`
- **Verify**: Bot responds within 3 seconds
#### Test 2: Long Message (Split Test)
```bash
# Generate a long message
python3 test_helpers/generate_test_messages.py long
```
- **Copy the output**
- **Paste into Telegram** to your bot
- **Verify**:
- Message is split into 2+ chunks
- First chunk ends with `(continues...)`
- Middle chunks have `(continued)` and `(continues...)`
- Last chunk starts with `(continued)`
- All chunks arrive in order
#### Test 3: Word Boundary Splitting
```bash
python3 test_helpers/generate_test_messages.py word
```
- Send to bot
- **Verify**: Splits at word boundaries (not mid-word)
## 🎯 Test Results Checklist
After running all tests, verify:
### Automated Tests
- [ ] ✅ All 20 automated tests passed
- [ ] ✅ Build completed successfully
- [ ] ✅ Binary size <10MB
- [ ] ✅ Health check completes in <5s
- [ ] ✅ No clippy warnings
### Manual Tests
- [ ] ✅ Bot responds to basic messages
- [ ] ✅ Long messages split correctly
- [ ] ✅ Continuation markers appear
- [ ] ✅ Word boundaries respected
- [ ] ✅ Allowlist blocks unauthorized users
- [ ] ✅ No errors in logs
### Performance
- [ ] ✅ Response time <3 seconds
- [ ] ✅ Memory usage <10MB
- [ ] ✅ No message loss
- [ ] ✅ Rate limiting works (100ms delays)
## 🐛 Troubleshooting
### Issue: Tests fail to compile
```bash
# Clean build
cargo clean
cargo build --release
# Update dependencies
cargo update
```
### Issue: "Bot token not configured"
```bash
# Check config
cat ~/.zeroclaw/config.toml | grep -A 5 telegram
# Reconfigure
zeroclaw onboard --channels-only
```
### Issue: Health check fails
```bash
# Test bot token directly
curl "https://api.telegram.org/bot<YOUR_TOKEN>/getMe"
# Should return: {"ok":true,"result":{...}}
```
### Issue: Bot doesn't respond
```bash
# Enable debug logging
RUST_LOG=debug zeroclaw channel start
# Look for:
# - "Telegram channel listening for messages..."
# - "ignoring message from unauthorized user" (if allowlist issue)
# - Any error messages
```
## 📊 Performance Benchmarks
After all fixes, you should see:
| Metric | Target | Command |
|--------|--------|---------|
| Unit test pass | 24/24 | `cargo test telegram --lib` |
| Build time | <30s | `time cargo build --release` |
| Binary size | ~3-4MB | `ls -lh target/release/zeroclaw` |
| Health check | <5s | `time zeroclaw channel doctor` |
| First response | <3s | Manual test in Telegram |
| Message split | <50ms | Check debug logs |
| Memory usage | <10MB | `ps aux \| grep zeroclaw` |
## 🔄 CI/CD Integration
Add to your workflow:
```bash
# Pre-commit hook
#!/bin/bash
./quick_test.sh
# CI pipeline
./test_telegram_integration.sh
```
## 📚 Next Steps
1. **Run the tests:**
```bash
./test_telegram_integration.sh
```
2. **Fix any failures** using the troubleshooting guide
3. **Complete manual tests** using the checklist
4. **Deploy to production** when all tests pass
5. **Monitor logs** for any issues:
```bash
zeroclaw daemon
# or
RUST_LOG=info zeroclaw channel start
```
## 🎉 Success!
If all tests pass:
- ✅ Message splitting works (4096 char limit)
- ✅ Health check has 5s timeout
- ✅ Empty chat_id is handled safely
- ✅ All 24 unit tests pass
- ✅ Code is production-ready
**Your Telegram integration is ready to go!** 🚀
---
## 📞 Support
- Issues: https://github.com/theonlyhennygod/zeroclaw/issues
- Docs: `./TESTING_TELEGRAM.md`
- Help: `zeroclaw --help`

337
TESTING_TELEGRAM.md Normal file
View file

@ -0,0 +1,337 @@
# Telegram Integration Testing Guide
This guide covers testing the Telegram channel integration for ZeroClaw.
## 🚀 Quick Start
### Automated Tests
```bash
# Full test suite (20+ tests, ~2 minutes)
./test_telegram_integration.sh
# Quick smoke test (~10 seconds)
./quick_test.sh
# Just unit tests
cargo test telegram --lib
```
## 📋 Test Coverage
### Automated Tests (20 tests)
The `test_telegram_integration.sh` script runs:
**Phase 1: Code Quality (5 tests)**
- ✅ Test compilation
- ✅ Unit tests (24 tests)
- ✅ Message splitting tests (8 tests)
- ✅ Clippy linting
- ✅ Code formatting
**Phase 2: Build Tests (3 tests)**
- ✅ Debug build
- ✅ Release build
- ✅ Binary size verification (<10MB)
**Phase 3: Configuration Tests (4 tests)**
- ✅ Config file exists
- ✅ Telegram section configured
- ✅ Bot token set
- ✅ User allowlist configured
**Phase 4: Health Check Tests (2 tests)**
- ✅ Health check timeout (<5s)
- ✅ Telegram API connectivity
**Phase 5: Feature Validation (6 tests)**
- ✅ Message splitting function
- ✅ Message length constant (4096)
- ✅ Timeout implementation
- ✅ chat_id validation
- ✅ Duration import
- ✅ Continuation markers
### Manual Tests (6 tests)
After running automated tests, perform these manual checks:
1. **Basic messaging**
```bash
zeroclaw channel start
```
- Send "Hello bot!" in Telegram
- Verify response within 3 seconds
2. **Long message splitting**
```bash
# Generate 5000+ char message
python3 -c 'print("test " * 1000)'
```
- Paste into Telegram
- Verify: Message split into chunks
- Verify: Markers show `(continues...)` and `(continued)`
- Verify: All chunks arrive in order
3. **Unauthorized user blocking**
```toml
# Edit ~/.zeroclaw/config.toml
allowed_users = ["999999999"]
```
- Send message to bot
- Verify: Warning in logs
- Verify: Message ignored
- Restore correct user ID
4. **Rate limiting**
- Send 10 messages rapidly
- Verify: All processed
- Verify: No "Too Many Requests" errors
- Verify: Responses have delays
5. **Error logging**
```bash
RUST_LOG=debug zeroclaw channel start
```
- Check for unexpected errors
- Verify proper error handling
6. **Health check timeout**
```bash
time zeroclaw channel doctor
```
- Verify: Completes in <5 seconds
## 🔍 Test Results Interpretation
### Success Criteria
- All 20 automated tests pass ✅
- Health check completes in <5s
- Binary size <10MB
- No clippy warnings ✅
- All manual tests pass ✅
### Common Issues
**Issue: Health check times out**
```
Solution: Check bot token is valid
curl "https://api.telegram.org/bot<TOKEN>/getMe"
```
**Issue: Bot doesn't respond**
```
Solution: Check user allowlist
1. Send message to bot
2. Check logs for user_id
3. Update config: allowed_users = ["YOUR_ID"]
4. Run: zeroclaw onboard --channels-only
```
**Issue: Message splitting not working**
```
Solution: Verify code changes
grep -n "split_message_for_telegram" src/channels/telegram.rs
grep -n "TELEGRAM_MAX_MESSAGE_LENGTH" src/channels/telegram.rs
```
## 🧪 Test Scenarios
### Scenario 1: First-Time Setup
```bash
# 1. Run automated tests
./test_telegram_integration.sh
# 2. Configure Telegram
zeroclaw onboard --interactive
# Select Telegram channel
# Enter bot token (from @BotFather)
# Enter your user ID
# 3. Verify health
zeroclaw channel doctor
# 4. Start channel
zeroclaw channel start
# 5. Send test message in Telegram
```
### Scenario 2: After Code Changes
```bash
# 1. Quick validation
./quick_test.sh
# 2. Full test suite
./test_telegram_integration.sh
# 3. Manual smoke test
zeroclaw channel start
# Send message in Telegram
```
### Scenario 3: Production Deployment
```bash
# 1. Full test suite
./test_telegram_integration.sh
# 2. Load test (optional)
# Send 100 messages rapidly
for i in {1..100}; do
echo "Test message $i" | \
curl -X POST "https://api.telegram.org/bot<TOKEN>/sendMessage" \
-d "chat_id=<CHAT_ID>" \
-d "text=Message $i"
done
# 3. Monitor logs
RUST_LOG=info zeroclaw daemon
# 4. Check metrics
zeroclaw status
```
## 📊 Performance Benchmarks
Expected values after all fixes:
| Metric | Expected | How to Measure |
| ---------------------- | ---------- | -------------------------------- |
| Health check time | <5s | `time zeroclaw channel doctor` |
| First response time | <3s | Time from sending to receiving |
| Message split overhead | <50ms | Check logs for timing |
| Memory usage | <10MB | `ps aux \| grep zeroclaw` |
| Binary size | ~3-4MB | `ls -lh target/release/zeroclaw` |
| Unit test coverage | 24/24 pass | `cargo test telegram --lib` |
## 🐛 Debugging Failed Tests
### Debug Unit Tests
```bash
# Verbose output
cargo test telegram --lib -- --nocapture
# Specific test
cargo test telegram_split_over_limit -- --nocapture
# Show ignored tests
cargo test telegram --lib -- --ignored
```
### Debug Integration Issues
```bash
# Maximum logging
RUST_LOG=trace zeroclaw channel start
# Check Telegram API directly
curl "https://api.telegram.org/bot<TOKEN>/getMe"
curl "https://api.telegram.org/bot<TOKEN>/getUpdates"
# Validate config
cat ~/.zeroclaw/config.toml | grep -A 3 "\[channels_config.telegram\]"
```
### Debug Build Issues
```bash
# Clean build
cargo clean
cargo build --release
# Check dependencies
cargo tree | grep telegram
# Update dependencies
cargo update
```
## 🎯 CI/CD Integration
Add to your CI pipeline:
```yaml
# .github/workflows/test.yml
name: Test Telegram Integration
on: [push, pull_request]
jobs:
test:
runs-on: blacksmith-2vcpu-ubuntu-2404
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: Run tests
run: |
cargo test telegram --lib
cargo clippy --all-targets -- -D warnings
- name: Check formatting
run: cargo fmt --check
```
## 📝 Test Checklist
Before merging code:
- [ ] `./quick_test.sh` passes
- [ ] `./test_telegram_integration.sh` passes
- [ ] Manual tests completed
- [ ] No new clippy warnings
- [ ] Code is formatted (`cargo fmt`)
- [ ] Documentation updated
- [ ] CHANGELOG.md updated
## 🚨 Emergency Rollback
If tests fail in production:
```bash
# 1. Check git history
git log --oneline src/channels/telegram.rs
# 2. Rollback to previous version
git revert <commit-hash>
# 3. Rebuild
cargo build --release
# 4. Restart service
zeroclaw service restart
# 5. Verify
zeroclaw channel doctor
```
## 📚 Additional Resources
- [Telegram Bot API Documentation](https://core.telegram.org/bots/api)
- [ZeroClaw Main README](README.md)
- [Contributing Guide](CONTRIBUTING.md)
- [Issue Tracker](https://github.com/theonlyhennygod/zeroclaw/issues)

View file

@ -2,14 +2,23 @@
# https://embarkstudios.github.io/cargo-deny/ # https://embarkstudios.github.io/cargo-deny/
[advisories] [advisories]
unmaintained = "workspace" # In v2, vulnerability advisories always emit errors (not configurable).
yanked = "warn" # unmaintained: scope of unmaintained-crate checks (all | workspace | transitive | none)
unmaintained = "all"
# yanked: deny | warn | allow
yanked = "deny"
# Ignore known unmaintained transitive deps we cannot easily replace
ignore = [
# bincode v2.0.1 via probe-rs — project ceased but 1.3.3 considered complete
"RUSTSEC-2025-0141",
]
[licenses] [licenses]
# All licenses are denied unless explicitly allowed # All licenses are denied unless explicitly allowed
allow = [ allow = [
"MIT", "MIT",
"Apache-2.0", "Apache-2.0",
"Apache-2.0 WITH LLVM-exception",
"BSD-2-Clause", "BSD-2-Clause",
"BSD-3-Clause", "BSD-3-Clause",
"ISC", "ISC",
@ -19,6 +28,7 @@ allow = [
"Zlib", "Zlib",
"MPL-2.0", "MPL-2.0",
"CDLA-Permissive-2.0", "CDLA-Permissive-2.0",
"0BSD",
] ]
unused-allowed-license = "allow" unused-allowed-license = "allow"

169
dev/README.md Normal file
View file

@ -0,0 +1,169 @@
# ZeroClaw Development Environment
A fully containerized development sandbox for ZeroClaw agents. This environment allows you to develop, test, and debug the agent in isolation without modifying your host system.
## Directory Structure
- **`agent/`**: (Merged into root Dockerfile)
- The development image is built from the root `Dockerfile` using the `dev` stage (`target: dev`).
- Based on `debian:bookworm-slim` (unlike production `distroless`).
- Includes `bash`, `curl`, and debug tools.
- **`sandbox/`**: Dockerfile for the simulated user environment.
- Based on `ubuntu:22.04`.
- Pre-loaded with `git`, `python3`, `nodejs`, `npm`, `gcc`, `make`.
- Simulates a real developer machine.
- **`docker-compose.yml`**: Defines the services and `dev-net` network.
- **`cli.sh`**: Helper script to manage the lifecycle.
## Usage
Run all commands from the repository root using the helper script:
### 1. Start Environment
```bash
./dev/cli.sh up
```
Builds the agent from source and starts both containers.
### 2. Enter Agent Container (`zeroclaw-dev`)
```bash
./dev/cli.sh agent
```
Use this to run `zeroclaw` CLI commands manually, debug the binary, or check logs internally.
- **Path**: `/zeroclaw-data`
- **User**: `nobody` (65534)
### 3. Enter Sandbox (`sandbox`)
```bash
./dev/cli.sh shell
```
Use this to act as the "user" or "environment" the agent interacts with.
- **Path**: `/home/developer/workspace`
- **User**: `developer` (sudo-enabled)
### 4. Development Cycle
1. Make changes to Rust code in `src/`.
2. Rebuild the agent:
```bash
./dev/cli.sh build
```
3. Test changes inside the container:
```bash
./dev/cli.sh agent
# inside container:
zeroclaw --version
```
### 5. Persistence & Shared Workspace
The local `playground/` directory (in repo root) is mounted as the shared workspace:
- **Agent**: `/zeroclaw-data/workspace`
- **Sandbox**: `/home/developer/workspace`
Files created by the agent are visible to the sandbox user, and vice versa.
The agent configuration lives in `target/.zeroclaw` (mounted to `/zeroclaw-data/.zeroclaw`), so settings persist across container rebuilds.
### 6. Cleanup
Stop containers and remove volumes and generated config:
```bash
./dev/cli.sh clean
```
**Note:** This removes `target/.zeroclaw` (config/DB) but leaves the `playground/` directory intact. To fully wipe everything, manually delete `playground/`.
## Local CI/CD (Docker-Only)
Use this when you want CI-style validation without relying on GitHub Actions and without running Rust toolchain commands on your host.
### 1. Build the local CI image
```bash
./dev/ci.sh build-image
```
### 2. Run full local CI pipeline
```bash
./dev/ci.sh all
```
This runs inside a container:
- `./scripts/ci/rust_quality_gate.sh`
- `cargo test --locked --verbose`
- `cargo build --release --locked --verbose`
- `cargo deny check licenses sources`
- `cargo audit`
- Docker smoke build (`docker build --target dev ...` + `--version` check)
To run an opt-in strict lint audit locally:
```bash
./dev/ci.sh lint-strict
```
To run the incremental strict gate (changed Rust lines only):
```bash
./dev/ci.sh lint-delta
```
### 3. Run targeted stages
```bash
./dev/ci.sh lint
./dev/ci.sh lint-delta
./dev/ci.sh test
./dev/ci.sh build
./dev/ci.sh deny
./dev/ci.sh audit
./dev/ci.sh security
./dev/ci.sh docker-smoke
# Optional host-side docs gate (changed-line markdown lint)
./scripts/ci/docs_quality_gate.sh
# Optional host-side docs links gate (changed-line added links)
./scripts/ci/docs_links_gate.sh
```
Note: local `deny` focuses on license/source policy; advisory scanning is handled by `audit`.
### 4. Enter CI container shell
```bash
./dev/ci.sh shell
```
### 5. Optional shortcut via existing dev CLI
```bash
./dev/cli.sh ci
./dev/cli.sh ci lint
```
### Isolation model
- Rust compilation, tests, and audit/deny tools run in `zeroclaw-local-ci` container.
- Your host filesystem is mounted at `/workspace`; no host Rust toolchain is required.
- Cargo build artifacts are written to container volume `/ci-target` (not your host `target/`).
- Docker smoke stage uses your Docker daemon to build image layers, but build steps execute in containers.
### Build cache notes
- Both `Dockerfile` and `dev/ci/Dockerfile` use BuildKit cache mounts for Cargo registry/git data.
- The root `Dockerfile` also caches Rust `target/` (`id=zeroclaw-target`) to speed repeat local image builds.
- Local CI reuses named Docker volumes for Cargo registry/git and target outputs.
- `./dev/ci.sh docker-smoke` and `./dev/ci.sh all` now use `docker buildx` local cache at `.cache/buildx-smoke` when available.
- The CI image keeps Rust toolchain defaults from `rust:1.92-slim` and installs pinned toolchain `1.92.0` (no custom `CARGO_HOME`/`RUSTUP_HOME` overrides), preventing repeated toolchain bootstrapping on each run.

133
dev/ci.sh Executable file
View file

@ -0,0 +1,133 @@
#!/usr/bin/env bash
set -euo pipefail
if [ -f "dev/docker-compose.ci.yml" ]; then
COMPOSE_FILE="dev/docker-compose.ci.yml"
elif [ -f "docker-compose.ci.yml" ] && [ "$(basename "$(pwd)")" = "dev" ]; then
COMPOSE_FILE="docker-compose.ci.yml"
else
echo "❌ Run this script from repo root or dev/ directory."
exit 1
fi
compose_cmd=(docker compose -f "$COMPOSE_FILE")
SMOKE_CACHE_DIR="${SMOKE_CACHE_DIR:-.cache/buildx-smoke}"
run_in_ci() {
local cmd="$1"
"${compose_cmd[@]}" run --rm local-ci bash -c "$cmd"
}
build_smoke_image() {
if docker buildx version >/dev/null 2>&1; then
mkdir -p "$SMOKE_CACHE_DIR"
local build_args=(
--load
--target dev
--cache-to "type=local,dest=$SMOKE_CACHE_DIR,mode=max"
-t zeroclaw-local-smoke:latest
.
)
if [ -f "$SMOKE_CACHE_DIR/index.json" ]; then
build_args=(--cache-from "type=local,src=$SMOKE_CACHE_DIR" "${build_args[@]}")
fi
docker buildx build "${build_args[@]}"
else
DOCKER_BUILDKIT=1 docker build --target dev -t zeroclaw-local-smoke:latest .
fi
}
print_help() {
cat <<'EOF'
ZeroClaw Local CI in Docker
Usage: ./dev/ci.sh <command>
Commands:
build-image Build/update the local CI image
shell Open an interactive shell inside the CI container
lint Run rustfmt + clippy correctness gate (container only)
lint-strict Run rustfmt + full clippy warnings gate (container only)
lint-delta Run strict lint delta gate on changed Rust lines (container only)
test Run cargo test (container only)
build Run release build smoke check (container only)
audit Run cargo audit (container only)
deny Run cargo deny check (container only)
security Run cargo audit + cargo deny (container only)
docker-smoke Build and verify runtime image (host docker daemon)
all Run lint, test, build, security, docker-smoke
clean Remove local CI containers and volumes
EOF
}
if [ $# -lt 1 ]; then
print_help
exit 1
fi
case "$1" in
build-image)
"${compose_cmd[@]}" build local-ci
;;
shell)
"${compose_cmd[@]}" run --rm local-ci bash
;;
lint)
run_in_ci "./scripts/ci/rust_quality_gate.sh"
;;
lint-strict)
run_in_ci "./scripts/ci/rust_quality_gate.sh --strict"
;;
lint-delta)
run_in_ci "./scripts/ci/rust_strict_delta_gate.sh"
;;
test)
run_in_ci "cargo test --locked --verbose"
;;
build)
run_in_ci "cargo build --release --locked --verbose"
;;
audit)
run_in_ci "cargo audit"
;;
deny)
run_in_ci "cargo deny check licenses sources"
;;
security)
run_in_ci "cargo deny check licenses sources"
run_in_ci "cargo audit"
;;
docker-smoke)
build_smoke_image
docker run --rm zeroclaw-local-smoke:latest --version
;;
all)
run_in_ci "./scripts/ci/rust_quality_gate.sh"
run_in_ci "cargo test --locked --verbose"
run_in_ci "cargo build --release --locked --verbose"
run_in_ci "cargo deny check licenses sources"
run_in_ci "cargo audit"
build_smoke_image
docker run --rm zeroclaw-local-smoke:latest --version
;;
clean)
"${compose_cmd[@]}" down -v --remove-orphans
;;
*)
print_help
exit 1
;;
esac

22
dev/ci/Dockerfile Normal file
View file

@ -0,0 +1,22 @@
# syntax=docker/dockerfile:1.7
FROM rust:1.92-slim@sha256:bf3368a992915f128293ac76917ab6e561e4dda883273c8f5c9f6f8ea37a378e
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
git \
pkg-config \
libssl-dev \
curl \
&& rm -rf /var/lib/apt/lists/*
RUN rustup toolchain install 1.92.0 --profile minimal --component rustfmt --component clippy
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
cargo install --locked cargo-audit --version 0.22.1 && \
cargo install --locked cargo-deny --version 0.18.5
WORKDIR /workspace
CMD ["bash"]

124
dev/cli.sh Executable file
View file

@ -0,0 +1,124 @@
#!/bin/bash
set -e
# Detect execution context (root or dev/)
if [ -f "dev/docker-compose.yml" ]; then
BASE_DIR="dev"
HOST_TARGET_DIR="target"
elif [ -f "docker-compose.yml" ] && [ "$(basename "$(pwd)")" == "dev" ]; then
BASE_DIR="."
HOST_TARGET_DIR="../target"
else
echo "❌ Error: Run this script from the project root or dev/ directory."
exit 1
fi
COMPOSE_FILE="$BASE_DIR/docker-compose.yml"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
function ensure_config {
CONFIG_DIR="$HOST_TARGET_DIR/.zeroclaw"
CONFIG_FILE="$CONFIG_DIR/config.toml"
WORKSPACE_DIR="$CONFIG_DIR/workspace"
if [ ! -f "$CONFIG_FILE" ]; then
echo -e "${YELLOW}⚙️ Config file missing in target/.zeroclaw. Creating default dev config from template...${NC}"
mkdir -p "$WORKSPACE_DIR"
# Copy template
cat "$BASE_DIR/config.template.toml" > "$CONFIG_FILE"
fi
}
function print_help {
echo -e "${YELLOW}ZeroClaw Development Environment Manager${NC}"
echo "Usage: ./dev/cli.sh [command]"
echo ""
echo "Commands:"
echo -e " ${GREEN}up${NC} Start dev environment (Agent + Sandbox)"
echo -e " ${GREEN}down${NC} Stop containers"
echo -e " ${GREEN}shell${NC} Enter Sandbox (Ubuntu)"
echo -e " ${GREEN}agent${NC} Enter Agent (ZeroClaw CLI)"
echo -e " ${GREEN}logs${NC} View logs"
echo -e " ${GREEN}build${NC} Rebuild images"
echo -e " ${GREEN}ci${NC} Run local CI checks in Docker (see ./dev/ci.sh)"
echo -e " ${GREEN}clean${NC} Stop and wipe workspace data"
}
if [ -z "$1" ]; then
print_help
exit 1
fi
case "$1" in
up)
ensure_config
echo -e "${GREEN}🚀 Starting Dev Environment...${NC}"
# Build context MUST be set correctly for docker compose
docker compose -f "$COMPOSE_FILE" up -d
echo -e "${GREEN}✅ Environment is running!${NC}"
echo -e " - Agent: http://127.0.0.1:3000"
echo -e " - Sandbox: running (background)"
echo -e " - Config: target/.zeroclaw/config.toml (Edit locally to apply changes)"
;;
down)
echo -e "${YELLOW}🛑 Stopping services...${NC}"
docker compose -f "$COMPOSE_FILE" down
echo -e "${GREEN}✅ Stopped.${NC}"
;;
shell)
echo -e "${GREEN}💻 Entering Sandbox (Ubuntu)... (Type 'exit' to leave)${NC}"
docker exec -it zeroclaw-sandbox /bin/bash
;;
agent)
echo -e "${GREEN}🤖 Entering Agent Container (ZeroClaw)... (Type 'exit' to leave)${NC}"
docker exec -it zeroclaw-dev /bin/bash
;;
logs)
docker compose -f "$COMPOSE_FILE" logs -f
;;
build)
echo -e "${YELLOW}🔨 Rebuilding images...${NC}"
docker compose -f "$COMPOSE_FILE" build
ensure_config
docker compose -f "$COMPOSE_FILE" up -d
echo -e "${GREEN}✅ Rebuild complete.${NC}"
;;
ci)
shift
if [ "$BASE_DIR" = "." ]; then
./ci.sh "${@:-all}"
else
./dev/ci.sh "${@:-all}"
fi
;;
clean)
echo -e "${RED}⚠️ WARNING: This will delete 'target/.zeroclaw' data and Docker volumes.${NC}"
read -p "Are you sure? (y/N) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
docker compose -f "$COMPOSE_FILE" down -v
rm -rf "$HOST_TARGET_DIR/.zeroclaw"
echo -e "${GREEN}🧹 Cleaned up (playground/ remains intact).${NC}"
else
echo "Cancelled."
fi
;;
*)
print_help
exit 1
;;
esac

12
dev/config.template.toml Normal file
View file

@ -0,0 +1,12 @@
workspace_dir = "/zeroclaw-data/workspace"
config_path = "/zeroclaw-data/.zeroclaw/config.toml"
# This is the Ollama Base URL, not a secret key
api_key = "http://host.docker.internal:11434"
default_provider = "ollama"
default_model = "llama3.2"
default_temperature = 0.7
[gateway]
port = 3000
host = "[::]"
allow_public_bind = true

23
dev/docker-compose.ci.yml Normal file
View file

@ -0,0 +1,23 @@
name: zeroclaw-local-ci
services:
local-ci:
build:
context: ..
dockerfile: dev/ci/Dockerfile
container_name: zeroclaw-local-ci
working_dir: /workspace
environment:
- CARGO_TERM_COLOR=always
- PATH=/usr/local/cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- CARGO_TARGET_DIR=/ci-target
volumes:
- ..:/workspace
- cargo-registry:/usr/local/cargo/registry
- cargo-git:/usr/local/cargo/git
- ci-target:/ci-target
volumes:
cargo-registry:
cargo-git:
ci-target:

59
dev/docker-compose.yml Normal file
View file

@ -0,0 +1,59 @@
# Development Environment for ZeroClaw Agentic Testing
#
# Use this for:
# - Running the agent in a sandboxed environment
# - Testing dangerous commands safely
# - Developing new skills/integrations
#
# Usage:
# cd dev && ./cli.sh up
# or from root: ./dev/cli.sh up
name: zeroclaw-dev
services:
# ── The Agent (Development Image) ──
# Builds from source using the 'dev' stage of the root Dockerfile
zeroclaw-dev:
build:
context: ..
dockerfile: Dockerfile
target: dev
container_name: zeroclaw-dev
restart: unless-stopped
environment:
- API_KEY
- PROVIDER
- ZEROCLAW_MODEL
- ZEROCLAW_GATEWAY_PORT=3000
- SANDBOX_HOST=zeroclaw-sandbox
volumes:
# Mount single config file (avoids shadowing other files in .zeroclaw)
- ../target/.zeroclaw/config.toml:/zeroclaw-data/.zeroclaw/config.toml
# Mount shared workspace
- ../playground:/zeroclaw-data/workspace
ports:
- "127.0.0.1:3000:3000"
networks:
- dev-net
# ── The Sandbox (Ubuntu Environment) ──
# A fully loaded Ubuntu environment for the agent to play in.
sandbox:
build:
context: sandbox # Context relative to dev/
dockerfile: Dockerfile
container_name: zeroclaw-sandbox
hostname: dev-box
command: ["tail", "-f", "/dev/null"]
working_dir: /home/developer/workspace
user: developer
environment:
- TERM=xterm-256color
- SHELL=/bin/bash
volumes:
- ../playground:/home/developer/workspace # Mount local playground
networks:
- dev-net
networks:
dev-net:
driver: bridge

34
dev/sandbox/Dockerfile Normal file
View file

@ -0,0 +1,34 @@
FROM ubuntu:22.04@sha256:c7eb020043d8fc2ae0793fb35a37bff1cf33f156d4d4b12ccc7f3ef8706c38b1
# Prevent interactive prompts during package installation
ENV DEBIAN_FRONTEND=noninteractive
# Install common development tools and runtimes
# - Node.js: Install v20 (LTS) from NodeSource
# - Core: curl, git, vim, build-essential (gcc, make)
# - Python: python3, pip
# - Network: ping, dnsutils
RUN apt-get update && apt-get install -y curl && \
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
apt-get install -y \
nodejs \
wget git vim nano unzip zip \
build-essential \
python3 python3-pip \
sudo \
iputils-ping dnsutils net-tools \
&& rm -rf /var/lib/apt/lists/* \
&& node --version && npm --version
# Create a non-root user 'developer' with UID 1000
# Grant passwordless sudo to simulate a local dev environment (using safe sudoers.d)
RUN useradd -m -s /bin/bash -u 1000 developer && \
echo "developer ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/developer && \
chmod 0440 /etc/sudoers.d/developer
# Set up the workspace
USER developer
WORKDIR /home/developer/workspace
# Default command
CMD ["/bin/bash"]

View file

@ -25,16 +25,29 @@ services:
# Options: openrouter, openai, anthropic, ollama # Options: openrouter, openai, anthropic, ollama
- PROVIDER=${PROVIDER:-openrouter} - PROVIDER=${PROVIDER:-openrouter}
# Allow public bind inside Docker (required for container networking)
- ZEROCLAW_ALLOW_PUBLIC_BIND=true
# Optional: Model override # Optional: Model override
# - ZEROCLAW_MODEL=anthropic/claude-sonnet-4-20250514 # - ZEROCLAW_MODEL=anthropic/claude-sonnet-4-20250514
volumes: volumes:
# Persist workspace and config # Persist workspace and config (must match WORKDIR/HOME in Dockerfile)
- zeroclaw-data:/data - zeroclaw-data:/zeroclaw-data
ports: ports:
# Gateway API port # Gateway API port (override HOST_PORT if 3000 is taken)
- "3000:3000" - "${HOST_PORT:-3000}:3000"
# Resource limits
deploy:
resources:
limits:
cpus: '2'
memory: 2G
reservations:
cpus: '0.5'
memory: 512M
# Health check # Health check
healthcheck: healthcheck:

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

View file

@ -0,0 +1,89 @@
# Actions Source Policy (Phase 1)
This document defines the current GitHub Actions source-control policy for this repository.
Phase 1 objective: lock down action sources with minimal disruption, before full SHA pinning.
## Current Policy
- Repository Actions permissions: enabled
- Allowed actions mode: selected
- SHA pinning required: false (deferred to Phase 2)
Selected allowlist patterns:
- `actions/*` (covers `actions/cache`, `actions/checkout`, `actions/upload-artifact`, `actions/download-artifact`, and other first-party actions)
- `docker/*`
- `dtolnay/rust-toolchain@*`
- `Swatinem/rust-cache@*`
- `DavidAnson/markdownlint-cli2-action@*`
- `lycheeverse/lychee-action@*`
- `EmbarkStudios/cargo-deny-action@*`
- `rhysd/actionlint@*`
- `softprops/action-gh-release@*`
- `sigstore/cosign-installer@*`
- `useblacksmith/*` (Blacksmith self-hosted runner infrastructure)
## Change Control Export
Use these commands to export the current effective policy for audit/change control:
```bash
gh api repos/zeroclaw-labs/zeroclaw/actions/permissions
gh api repos/zeroclaw-labs/zeroclaw/actions/permissions/selected-actions
```
Record each policy change with:
- change date/time (UTC)
- actor
- reason
- allowlist delta (added/removed patterns)
- rollback note
## Why This Phase
- Reduces supply-chain risk from unreviewed marketplace actions.
- Preserves current CI/CD functionality with low migration overhead.
- Prepares for Phase 2 full SHA pinning without blocking active development.
## Agentic Workflow Guardrails
Because this repository has high agent-authored change volume:
- Any PR that adds or changes `uses:` action sources must include an allowlist impact note.
- New third-party actions require explicit maintainer review before allowlisting.
- Expand allowlist only for verified missing actions; avoid broad wildcard exceptions.
- Keep rollback instructions in the PR description for Actions policy changes.
## Validation Checklist
After allowlist changes, validate:
1. `CI`
2. `Docker`
3. `Security Audit`
4. `Workflow Sanity`
5. `Release` (when safe to run)
Failure mode to watch for:
- `action is not allowed by policy`
If encountered, add only the specific trusted missing action, rerun, and document why.
Latest sweep notes:
- 2026-02-16: Hidden dependency discovered in `release.yml`: `sigstore/cosign-installer@...`
- Added allowlist pattern: `sigstore/cosign-installer@*`
- 2026-02-16: Blacksmith migration blocked workflow execution
- Added allowlist pattern: `useblacksmith/*` for self-hosted runner infrastructure
- Actions: `useblacksmith/setup-docker-builder@v1`, `useblacksmith/build-push-action@v2`
## Rollback
Emergency unblock path:
1. Temporarily set Actions policy back to `all`.
2. Restore selected allowlist after identifying missing entries.
3. Record incident and final allowlist delta.

View file

@ -0,0 +1,116 @@
# Adding Boards and Tools — ZeroClaw Hardware Guide
This guide explains how to add new hardware boards and custom tools to ZeroClaw.
## Quick Start: Add a Board via CLI
```bash
# Add a board (updates ~/.zeroclaw/config.toml)
zeroclaw peripheral add nucleo-f401re /dev/ttyACM0
zeroclaw peripheral add arduino-uno /dev/cu.usbmodem12345
zeroclaw peripheral add rpi-gpio native # for Raspberry Pi GPIO (Linux)
# Restart daemon to apply
zeroclaw daemon --host 127.0.0.1 --port 8080
```
## Supported Boards
| Board | Transport | Path Example |
|-----------------|-----------|---------------------------|
| nucleo-f401re | serial | /dev/ttyACM0, /dev/cu.usbmodem* |
| arduino-uno | serial | /dev/ttyACM0, /dev/cu.usbmodem* |
| arduino-uno-q | bridge | (Uno Q IP) |
| rpi-gpio | native | native |
| esp32 | serial | /dev/ttyUSB0 |
## Manual Config
Edit `~/.zeroclaw/config.toml`:
```toml
[peripherals]
enabled = true
datasheet_dir = "docs/datasheets" # optional: RAG for "turn on red led" → pin 13
[[peripherals.boards]]
board = "nucleo-f401re"
transport = "serial"
path = "/dev/ttyACM0"
baud = 115200
[[peripherals.boards]]
board = "arduino-uno"
transport = "serial"
path = "/dev/cu.usbmodem12345"
baud = 115200
```
## Adding a Datasheet (RAG)
Place `.md` or `.txt` files in `docs/datasheets/` (or your `datasheet_dir`). Name files by board: `nucleo-f401re.md`, `arduino-uno.md`.
### Pin Aliases (Recommended)
Add a `## Pin Aliases` section so the agent can map "red led" → pin 13:
```markdown
# My Board
## Pin Aliases
| alias | pin |
|-------------|-----|
| red_led | 13 |
| builtin_led | 13 |
| user_led | 5 |
```
Or use key-value format:
```markdown
## Pin Aliases
red_led: 13
builtin_led: 13
```
### PDF Datasheets
With the `rag-pdf` feature, ZeroClaw can index PDF files:
```bash
cargo build --features hardware,rag-pdf
```
Place PDFs in the datasheet directory. They are extracted and chunked for RAG.
## Adding a New Board Type
1. **Create a datasheet**`docs/datasheets/my-board.md` with pin aliases and GPIO info.
2. **Add to config**`zeroclaw peripheral add my-board /dev/ttyUSB0`
3. **Implement a peripheral** (optional) — For custom protocols, implement the `Peripheral` trait in `src/peripherals/` and register in `create_peripheral_tools`.
See `docs/hardware-peripherals-design.md` for the full design.
## Adding a Custom Tool
1. Implement the `Tool` trait in `src/tools/`.
2. Register in `create_peripheral_tools` (for hardware tools) or the agent tool registry.
3. Add a tool description to the agent's `tool_descs` in `src/agent/loop_.rs`.
## CLI Reference
| Command | Description |
|---------|-------------|
| `zeroclaw peripheral list` | List configured boards |
| `zeroclaw peripheral add <board> <path>` | Add board (writes config) |
| `zeroclaw peripheral flash` | Flash Arduino firmware |
| `zeroclaw peripheral flash-nucleo` | Flash Nucleo firmware |
| `zeroclaw hardware discover` | List USB devices |
| `zeroclaw hardware info` | Chip info via probe-rs |
## Troubleshooting
- **Serial port not found** — On macOS use `/dev/cu.usbmodem*`; on Linux use `/dev/ttyACM0` or `/dev/ttyUSB0`.
- **Build with hardware**`cargo build --features hardware`
- **Probe-rs for Nucleo**`cargo build --features hardware,probe`

348
docs/agnostic-security.md Normal file
View file

@ -0,0 +1,348 @@
# Agnostic Security: Zero Impact on Portability
## Core Question: Will security features break...
1. ❓ Fast cross-compilation builds?
2. ❓ Pluggable architecture (swap anything)?
3. ❓ Hardware agnosticism (ARM, x86, RISC-V)?
4. ❓ Small hardware support (<5MB RAM, $10 boards)?
**Answer: NO to all** — Security is designed as **optional feature flags** with **platform-specific conditional compilation**.
---
## 1. Build Speed: Feature-Gated Security
### Cargo.toml: Security Features Behind Features
```toml
[features]
default = ["basic-security"]
# Basic security (always on, zero overhead)
basic-security = []
# Platform-specific sandboxing (opt-in per platform)
sandbox-landlock = [] # Linux only
sandbox-firejail = [] # Linux only
sandbox-bubblewrap = []# macOS/Linux
sandbox-docker = [] # All platforms (heavy)
# Full security suite (for production builds)
security-full = [
"basic-security",
"sandbox-landlock",
"resource-monitoring",
"audit-logging",
]
# Resource & audit monitoring
resource-monitoring = []
audit-logging = []
# Development builds (fastest, no extra deps)
dev = []
```
### Build Commands (Choose Your Profile)
```bash
# Ultra-fast dev build (no security extras)
cargo build --profile dev
# Release build with basic security (default)
cargo build --release
# → Includes: allowlist, path blocking, injection protection
# → Excludes: Landlock, Firejail, audit logging
# Production build with full security
cargo build --release --features security-full
# → Includes: Everything
# Platform-specific sandbox only
cargo build --release --features sandbox-landlock # Linux
cargo build --release --features sandbox-docker # All platforms
```
### Conditional Compilation: Zero Overhead When Disabled
```rust
// src/security/mod.rs
#[cfg(feature = "sandbox-landlock")]
mod landlock;
#[cfg(feature = "sandbox-landlock")]
pub use landlock::LandlockSandbox;
#[cfg(feature = "sandbox-firejail")]
mod firejail;
#[cfg(feature = "sandbox-firejail")]
pub use firejail::FirejailSandbox;
// Always-include basic security (no feature flag)
pub mod policy; // allowlist, path blocking, injection protection
```
**Result**: When features are disabled, the code isn't even compiled — **zero binary bloat**.
---
## 2. Pluggable Architecture: Security Is a Trait Too
### Security Backend Trait (Swappable Like Everything Else)
```rust
// src/security/traits.rs
#[async_trait]
pub trait Sandbox: Send + Sync {
/// Wrap a command with sandbox protection
fn wrap_command(&self, cmd: &mut std::process::Command) -> std::io::Result<()>;
/// Check if sandbox is available on this platform
fn is_available(&self) -> bool;
/// Human-readable name
fn name(&self) -> &str;
}
// No-op sandbox (always available)
pub struct NoopSandbox;
impl Sandbox for NoopSandbox {
fn wrap_command(&self, _cmd: &mut std::process::Command) -> std::io::Result<()> {
Ok(()) // Pass through unchanged
}
fn is_available(&self) -> bool { true }
fn name(&self) -> &str { "none" }
}
```
### Factory Pattern: Auto-Select Based on Features
```rust
// src/security/factory.rs
pub fn create_sandbox() -> Box<dyn Sandbox> {
#[cfg(feature = "sandbox-landlock")]
{
if LandlockSandbox::is_available() {
return Box::new(LandlockSandbox::new());
}
}
#[cfg(feature = "sandbox-firejail")]
{
if FirejailSandbox::is_available() {
return Box::new(FirejailSandbox::new());
}
}
#[cfg(feature = "sandbox-bubblewrap")]
{
if BubblewrapSandbox::is_available() {
return Box::new(BubblewrapSandbox::new());
}
}
#[cfg(feature = "sandbox-docker")]
{
if DockerSandbox::is_available() {
return Box::new(DockerSandbox::new());
}
}
// Fallback: always available
Box::new(NoopSandbox)
}
```
**Just like providers, channels, and memory — security is pluggable!**
---
## 3. Hardware Agnosticism: Same Binary, Different Platforms
### Cross-Platform Behavior Matrix
| Platform | Builds On | Runtime Behavior |
|----------|-----------|------------------|
| **Linux ARM** (Raspberry Pi) | ✅ Yes | Landlock → None (graceful) |
| **Linux x86_64** | ✅ Yes | Landlock → Firejail → None |
| **macOS ARM** (M1/M2) | ✅ Yes | Bubblewrap → None |
| **macOS x86_64** | ✅ Yes | Bubblewrap → None |
| **Windows ARM** | ✅ Yes | None (app-layer) |
| **Windows x86_64** | ✅ Yes | None (app-layer) |
| **RISC-V Linux** | ✅ Yes | Landlock → None |
### How It Works: Runtime Detection
```rust
// src/security/detect.rs
impl SandboxingStrategy {
/// Choose best available sandbox AT RUNTIME
pub fn detect() -> SandboxingStrategy {
#[cfg(target_os = "linux")]
{
// Try Landlock first (kernel feature detection)
if Self::probe_landlock() {
return SandboxingStrategy::Landlock;
}
// Try Firejail (user-space tool detection)
if Self::probe_firejail() {
return SandboxingStrategy::Firejail;
}
}
#[cfg(target_os = "macos")]
{
if Self::probe_bubblewrap() {
return SandboxingStrategy::Bubblewrap;
}
}
// Always available fallback
SandboxingStrategy::ApplicationLayer
}
}
```
**Same binary runs everywhere** — it just adapts its protection level based on what's available.
---
## 4. Small Hardware: Memory Impact Analysis
### Binary Size Impact (Estimated)
| Feature | Code Size | RAM Overhead | Status |
|---------|-----------|--------------|--------|
| **Base ZeroClaw** | 3.4MB | <5MB | Current |
| **+ Landlock** | +50KB | +100KB | ✅ Linux 5.13+ |
| **+ Firejail wrapper** | +20KB | +0KB (external) | ✅ Linux + firejail |
| **+ Memory monitoring** | +30KB | +50KB | ✅ All platforms |
| **+ Audit logging** | +40KB | +200KB (buffered) | ✅ All platforms |
| **Full security** | +140KB | +350KB | ✅ Still <6MB total |
### $10 Hardware Compatibility
| Hardware | RAM | ZeroClaw (base) | ZeroClaw (full security) | Status |
|----------|-----|-----------------|--------------------------|--------|
| **Raspberry Pi Zero** | 512MB | ✅ 2% | ✅ 2.5% | Works |
| **Orange Pi Zero** | 512MB | ✅ 2% | ✅ 2.5% | Works |
| **NanoPi NEO** | 256MB | ✅ 4% | ✅ 5% | Works |
| **C.H.I.P.** | 512MB | ✅ 2% | ✅ 2.5% | Works |
| **Rock64** | 1GB | ✅ 1% | ✅ 1.2% | Works |
**Even with full security, ZeroClaw uses <5% of RAM on $10 boards.**
---
## 5. Agnostic Swaps: Everything Remains Pluggable
### ZeroClaw's Core Promise: Swap Anything
```rust
// Providers (already pluggable)
Box<dyn Provider>
// Channels (already pluggable)
Box<dyn Channel>
// Memory (already pluggable)
Box<dyn MemoryBackend>
// Tunnels (already pluggable)
Box<dyn Tunnel>
// NOW ALSO: Security (newly pluggable)
Box<dyn Sandbox>
Box<dyn Auditor>
Box<dyn ResourceMonitor>
```
### Swap Security Backends via Config
```toml
# Use no sandbox (fastest, app-layer only)
[security.sandbox]
backend = "none"
# Use Landlock (Linux kernel LSM, native)
[security.sandbox]
backend = "landlock"
# Use Firejail (user-space, needs firejail installed)
[security.sandbox]
backend = "firejail"
# Use Docker (heaviest, most isolated)
[security.sandbox]
backend = "docker"
```
**Just like swapping OpenAI for Gemini, or SQLite for PostgreSQL.**
---
## 6. Dependency Impact: Minimal New Deps
### Current Dependencies (for context)
```
reqwest, tokio, serde, anyhow, uuid, chrono, rusqlite,
axum, tracing, opentelemetry, ...
```
### Security Feature Dependencies
| Feature | New Dependencies | Platform |
|---------|------------------|----------|
| **Landlock** | `landlock` crate (pure Rust) | Linux only |
| **Firejail** | None (external binary) | Linux only |
| **Bubblewrap** | None (external binary) | macOS/Linux |
| **Docker** | `bollard` crate (Docker API) | All platforms |
| **Memory monitoring** | None (std::alloc) | All platforms |
| **Audit logging** | None (already have hmac/sha2) | All platforms |
**Result**: Most features add **zero new Rust dependencies** — they either:
1. Use pure-Rust crates (landlock)
2. Wrap external binaries (Firejail, Bubblewrap)
3. Use existing deps (hmac, sha2 already in Cargo.toml)
---
## Summary: Core Value Propositions Preserved
| Value Prop | Before | After (with security) | Status |
|------------|--------|----------------------|--------|
| **<5MB RAM** | <5MB | <6MB (worst case) | Preserved |
| **<10ms startup** | <10ms | <15ms (detection) | Preserved |
| **3.4MB binary** | ✅ 3.4MB | ✅ 3.5MB (with all features) | ✅ Preserved |
| **ARM + x86 + RISC-V** | ✅ All | ✅ All | ✅ Preserved |
| **$10 hardware** | ✅ Works | ✅ Works | ✅ Preserved |
| **Pluggable everything** | ✅ Yes | ✅ Yes (security too) | ✅ Enhanced |
| **Cross-platform** | ✅ Yes | ✅ Yes | ✅ Preserved |
---
## The Key: Feature Flags + Conditional Compilation
```bash
# Developer build (fastest, no extra features)
cargo build --profile dev
# Standard release (your current build)
cargo build --release
# Production with full security
cargo build --release --features security-full
# Target specific hardware
cargo build --release --target aarch64-unknown-linux-gnu # Raspberry Pi
cargo build --release --target riscv64gc-unknown-linux-gnu # RISC-V
cargo build --release --target armv7-unknown-linux-gnueabihf # ARMv7
```
**Every target, every platform, every use case — still fast, still small, still agnostic.**

217
docs/arduino-uno-q-setup.md Normal file
View file

@ -0,0 +1,217 @@
# ZeroClaw on Arduino Uno Q — Step-by-Step Guide
Run ZeroClaw on the Arduino Uno Q's Linux side. Telegram works over WiFi; GPIO control uses the Bridge (requires a minimal App Lab app).
---
## What's Included (No Code Changes Needed)
ZeroClaw includes everything needed for Arduino Uno Q. **Clone the repo and follow this guide — no patches or custom code required.**
| Component | Location | Purpose |
|-----------|----------|---------|
| Bridge app | `firmware/zeroclaw-uno-q-bridge/` | MCU sketch + Python socket server (port 9999) for GPIO |
| Bridge tools | `src/peripherals/uno_q_bridge.rs` | `gpio_read` / `gpio_write` tools that talk to the Bridge over TCP |
| Setup command | `src/peripherals/uno_q_setup.rs` | `zeroclaw peripheral setup-uno-q` deploys the Bridge via scp + arduino-app-cli |
| Config schema | `board = "arduino-uno-q"`, `transport = "bridge"` | Supported in `config.toml` |
Build with `--features hardware` (or the default features) to include Uno Q support.
---
## Prerequisites
- Arduino Uno Q with WiFi configured
- Arduino App Lab installed on your Mac (for initial setup and deployment)
- API key for LLM (OpenRouter, etc.)
---
## Phase 1: Initial Uno Q Setup (One-Time)
### 1.1 Configure Uno Q via App Lab
1. Download [Arduino App Lab](https://docs.arduino.cc/software/app-lab/) (AppImage on Linux).
2. Connect Uno Q via USB, power it on.
3. Open App Lab, connect to the board.
4. Follow the setup wizard:
- Set username and password (for SSH)
- Configure WiFi (SSID, password)
- Apply any firmware updates
5. Note the IP address shown (e.g. `arduino@192.168.1.42`) or find it later via `ip addr show` in App Lab's terminal.
### 1.2 Verify SSH Access
```bash
ssh arduino@<UNO_Q_IP>
# Enter the password you set
```
---
## Phase 2: Install ZeroClaw on Uno Q
### Option A: Build on the Device (Simpler, ~2040 min)
```bash
# SSH into Uno Q
ssh arduino@<UNO_Q_IP>
# Install Rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
source ~/.cargo/env
# Install build deps (Debian)
sudo apt-get update
sudo apt-get install -y pkg-config libssl-dev
# Clone zeroclaw (or scp your project)
git clone https://github.com/theonlyhennygod/zeroclaw.git
cd zeroclaw
# Build (takes ~1530 min on Uno Q)
cargo build --release
# Install
sudo cp target/release/zeroclaw /usr/local/bin/
```
### Option B: Cross-Compile on Mac (Faster)
```bash
# On your Mac — add aarch64 target
rustup target add aarch64-unknown-linux-gnu
# Install cross-compiler (macOS; required for linking)
brew tap messense/macos-cross-toolchains
brew install aarch64-unknown-linux-gnu
# Build
CC_aarch64_unknown_linux_gnu=aarch64-unknown-linux-gnu-gcc cargo build --release --target aarch64-unknown-linux-gnu
# Copy to Uno Q
scp target/aarch64-unknown-linux-gnu/release/zeroclaw arduino@<UNO_Q_IP>:~/
ssh arduino@<UNO_Q_IP> "sudo mv ~/zeroclaw /usr/local/bin/"
```
If cross-compile fails, use Option A and build on the device.
---
## Phase 3: Configure ZeroClaw
### 3.1 Run Onboard (or Create Config Manually)
```bash
ssh arduino@<UNO_Q_IP>
# Quick config
zeroclaw onboard --api-key YOUR_OPENROUTER_KEY --provider openrouter
# Or create config manually
mkdir -p ~/.zeroclaw/workspace
nano ~/.zeroclaw/config.toml
```
### 3.2 Minimal config.toml
```toml
api_key = "YOUR_OPENROUTER_API_KEY"
default_provider = "openrouter"
default_model = "anthropic/claude-sonnet-4"
[peripherals]
enabled = false
# GPIO via Bridge requires Phase 4
[channels_config.telegram]
bot_token = "YOUR_TELEGRAM_BOT_TOKEN"
allowed_users = ["*"]
[gateway]
host = "127.0.0.1"
port = 8080
allow_public_bind = false
[agent]
compact_context = true
```
---
## Phase 4: Run ZeroClaw Daemon
```bash
ssh arduino@<UNO_Q_IP>
# Run daemon (Telegram polling works over WiFi)
zeroclaw daemon --host 127.0.0.1 --port 8080
```
**At this point:** Telegram chat works. Send messages to your bot — ZeroClaw responds. No GPIO yet.
---
## Phase 5: GPIO via Bridge (ZeroClaw Handles It)
ZeroClaw includes the Bridge app and setup command.
### 5.1 Deploy Bridge App
**From your Mac** (with zeroclaw repo):
```bash
zeroclaw peripheral setup-uno-q --host 192.168.0.48
```
**From the Uno Q** (SSH'd in):
```bash
zeroclaw peripheral setup-uno-q
```
This copies the Bridge app to `~/ArduinoApps/zeroclaw-uno-q-bridge` and starts it.
### 5.2 Add to config.toml
```toml
[peripherals]
enabled = true
[[peripherals.boards]]
board = "arduino-uno-q"
transport = "bridge"
```
### 5.3 Run ZeroClaw
```bash
zeroclaw daemon --host 127.0.0.1 --port 8080
```
Now when you message your Telegram bot *"Turn on the LED"* or *"Set pin 13 high"*, ZeroClaw uses `gpio_write` via the Bridge.
---
## Summary: Commands Start to End
| Step | Command |
|------|---------|
| 1 | Configure Uno Q in App Lab (WiFi, SSH) |
| 2 | `ssh arduino@<IP>` |
| 3 | `curl -sSf https://sh.rustup.rs \| sh -s -- -y && source ~/.cargo/env` |
| 4 | `sudo apt-get install -y pkg-config libssl-dev` |
| 5 | `git clone https://github.com/theonlyhennygod/zeroclaw.git && cd zeroclaw` |
| 6 | `cargo build --release --no-default-features` |
| 7 | `zeroclaw onboard --api-key KEY --provider openrouter` |
| 8 | Edit `~/.zeroclaw/config.toml` (add Telegram bot_token) |
| 9 | `zeroclaw daemon --host 127.0.0.1 --port 8080` |
| 10 | Message your Telegram bot — it responds |
---
## Troubleshooting
- **"command not found: zeroclaw"** — Use full path: `/usr/local/bin/zeroclaw` or ensure `~/.cargo/bin` is in PATH.
- **Telegram not responding** — Check bot_token, allowed_users, and that the Uno Q has internet (WiFi).
- **Out of memory** — Use `--no-default-features` to reduce binary size; consider `compact_context = true`.
- **GPIO commands ignored** — Ensure Bridge app is running (`zeroclaw peripheral setup-uno-q` deploys and starts it). Config must have `board = "arduino-uno-q"` and `transport = "bridge"`.
- **LLM provider (GLM/Zhipu)** — Use `default_provider = "glm"` or `"zhipu"` with `GLM_API_KEY` in env or config. ZeroClaw uses the correct v4 endpoint.

186
docs/audit-logging.md Normal file
View file

@ -0,0 +1,186 @@
# Audit Logging for ZeroClaw
## Problem
ZeroClaw logs actions but lacks tamper-evident audit trails for:
- Who executed what command
- When and from which channel
- What resources were accessed
- Whether security policies were triggered
---
## Proposed Audit Log Format
```json
{
"timestamp": "2026-02-16T12:34:56Z",
"event_id": "evt_1a2b3c4d",
"event_type": "command_execution",
"actor": {
"channel": "telegram",
"user_id": "123456789",
"username": "@alice"
},
"action": {
"command": "ls -la",
"risk_level": "low",
"approved": false,
"allowed": true
},
"result": {
"success": true,
"exit_code": 0,
"duration_ms": 15
},
"security": {
"policy_violation": false,
"rate_limit_remaining": 19
},
"signature": "SHA256:abc123..." // HMAC for tamper evidence
}
```
---
## Implementation
```rust
// src/security/audit.rs
use serde::{Deserialize, Serialize};
use std::io::Write;
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuditEvent {
pub timestamp: String,
pub event_id: String,
pub event_type: AuditEventType,
pub actor: Actor,
pub action: Action,
pub result: ExecutionResult,
pub security: SecurityContext,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AuditEventType {
CommandExecution,
FileAccess,
ConfigurationChange,
AuthSuccess,
AuthFailure,
PolicyViolation,
}
pub struct AuditLogger {
log_path: PathBuf,
signing_key: Option<hmac::Hmac<sha2::Sha256>>,
}
impl AuditLogger {
pub fn log(&self, event: &AuditEvent) -> anyhow::Result<()> {
let mut line = serde_json::to_string(event)?;
// Add HMAC signature if key configured
if let Some(ref key) = self.signing_key {
let signature = compute_hmac(key, line.as_bytes());
line.push_str(&format!("\n\"signature\": \"{}\"", signature));
}
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(&self.log_path)?;
writeln!(file, "{}", line)?;
file.sync_all()?; // Force flush for durability
Ok(())
}
pub fn search(&self, filter: AuditFilter) -> Vec<AuditEvent> {
// Search log file by filter criteria
todo!()
}
}
```
---
## Config Schema
```toml
[security.audit]
enabled = true
log_path = "~/.config/zeroclaw/audit.log"
max_size_mb = 100
rotate = "daily" # daily | weekly | size
# Tamper evidence
sign_events = true
signing_key_path = "~/.config/zeroclaw/audit.key"
# What to log
log_commands = true
log_file_access = true
log_auth_events = true
log_policy_violations = true
```
---
## Audit Query CLI
```bash
# Show all commands executed by @alice
zeroclaw audit --user @alice
# Show all high-risk commands
zeroclaw audit --risk high
# Show violations from last 24 hours
zeroclaw audit --since 24h --violations-only
# Export to JSON for analysis
zeroclaw audit --format json --output audit.json
# Verify log integrity
zeroclaw audit --verify-signatures
```
---
## Log Rotation
```rust
pub fn rotate_audit_log(log_path: &PathBuf, max_size: u64) -> anyhow::Result<()> {
let metadata = std::fs::metadata(log_path)?;
if metadata.len() < max_size {
return Ok(());
}
// Rotate: audit.log -> audit.log.1 -> audit.log.2 -> ...
let stem = log_path.file_stem().unwrap_or_default();
let extension = log_path.extension().and_then(|s| s.to_str()).unwrap_or("log");
for i in (1..10).rev() {
let old_name = format!("{}.{}.{}", stem, i, extension);
let new_name = format!("{}.{}.{}", stem, i + 1, extension);
let _ = std::fs::rename(old_name, new_name);
}
let rotated = format!("{}.1.{}", stem, extension);
std::fs::rename(log_path, &rotated)?;
Ok(())
}
```
---
## Implementation Priority
| Phase | Feature | Effort | Security Value |
|-------|---------|--------|----------------|
| **P0** | Basic event logging | Low | Medium |
| **P1** | Query CLI | Medium | Medium |
| **P2** | HMAC signing | Medium | High |
| **P3** | Log rotation + archival | Low | Medium |

110
docs/ci-map.md Normal file
View file

@ -0,0 +1,110 @@
# CI Workflow Map
This document explains what each GitHub workflow does, when it runs, and whether it should block merges.
## Merge-Blocking vs Optional
Merge-blocking checks should stay small and deterministic. Optional checks are useful for automation and maintenance, but should not block normal development.
### Merge-Blocking
- `.github/workflows/ci.yml` (`CI`)
- Purpose: Rust validation (`cargo fmt --all -- --check`, `cargo clippy --locked --all-targets -- -D clippy::correctness`, strict delta lint gate on changed Rust lines, `test`, release build smoke) + docs quality checks when docs change (`markdownlint` blocks only issues on changed lines; link check scans only links added on changed lines)
- Additional behavior: PRs that change `.github/workflows/**` require at least one approving review from a login in `WORKFLOW_OWNER_LOGINS` (repository variable fallback: `theonlyhennygod,willsarg`)
- Additional behavior: lint gates run before `test`/`build`; when lint/docs gates fail on PRs, CI posts an actionable feedback comment with failing gate names and local fix commands
- Merge gate: `CI Required Gate`
- `.github/workflows/workflow-sanity.yml` (`Workflow Sanity`)
- Purpose: lint GitHub workflow files (`actionlint`, tab checks)
- Recommended for workflow-changing PRs
- `.github/workflows/pr-intake-sanity.yml` (`PR Intake Sanity`)
- Purpose: safe pre-CI PR checks (template completeness, added-line tabs/trailing-whitespace/conflict markers) with immediate sticky feedback comment
### Non-Blocking but Important
- `.github/workflows/docker.yml` (`Docker`)
- Purpose: PR docker smoke check and publish images on `main`/tag pushes
- `.github/workflows/security.yml` (`Security Audit`)
- Purpose: dependency advisories (`cargo audit`) and policy/license checks (`cargo deny`)
- `.github/workflows/release.yml` (`Release`)
- Purpose: build tagged release artifacts and publish GitHub releases
- `.github/workflows/label-policy-sanity.yml` (`Label Policy Sanity`)
- Purpose: validate shared contributor-tier policy in `.github/label-policy.json` and ensure label workflows consume that policy
- `.github/workflows/rust-reusable.yml` (`Rust Reusable Job`)
- Purpose: reusable Rust setup/cache + command runner for workflow-call consumers
### Optional Repository Automation
- `.github/workflows/labeler.yml` (`PR Labeler`)
- Purpose: scope/path labels + size/risk labels + fine-grained module labels (`<module>: <component>`)
- Additional behavior: label descriptions are auto-managed as hover tooltips to explain each auto-judgment rule
- Additional behavior: provider-related keywords in provider/config/onboard/integration changes are promoted to `provider:*` labels (for example `provider:kimi`, `provider:deepseek`)
- Additional behavior: hierarchical de-duplication keeps only the most specific scope labels (for example `tool:composio` suppresses `tool:core` and `tool`)
- Additional behavior: module namespaces are compacted — one specific module keeps `prefix:component`; multiple specifics collapse to just `prefix`
- Additional behavior: applies contributor tiers on PRs by merged PR count (`trusted` >=5, `experienced` >=10, `principal` >=20, `distinguished` >=50)
- Additional behavior: final label set is priority-sorted (`risk:*` first, then `size:*`, then contributor tier, then module/path labels)
- Additional behavior: managed label colors follow display order to produce a smooth left-to-right gradient when many labels are present
- Manual governance: supports `workflow_dispatch` with `mode=audit|repair` to inspect/fix managed label metadata drift across the whole repository
- Additional behavior: risk + size labels are auto-corrected on manual PR label edits (`labeled`/`unlabeled` events); apply `risk: manual` when maintainers intentionally override automated risk selection
- High-risk heuristic paths: `src/security/**`, `src/runtime/**`, `src/gateway/**`, `src/tools/**`, `.github/workflows/**`
- Guardrail: maintainers can apply `risk: manual` to freeze automated risk recalculation
- `.github/workflows/auto-response.yml` (`PR Auto Responder`)
- Purpose: first-time contributor onboarding + label-driven response routing (`r:support`, `r:needs-repro`, etc.)
- Additional behavior: applies contributor tiers on issues by merged PR count (`trusted` >=5, `experienced` >=10, `principal` >=20, `distinguished` >=50), matching PR tier thresholds exactly
- Additional behavior: contributor-tier labels are treated as automation-managed (manual add/remove on PR/issue is auto-corrected)
- Guardrail: label-based close routes are issue-only; PRs are never auto-closed by route labels
- `.github/workflows/stale.yml` (`Stale`)
- Purpose: stale issue/PR lifecycle automation
- `.github/dependabot.yml` (`Dependabot`)
- Purpose: grouped, rate-limited dependency update PRs (Cargo + GitHub Actions)
- `.github/workflows/pr-hygiene.yml` (`PR Hygiene`)
- Purpose: nudge stale-but-active PRs to rebase/re-run required checks before queue starvation
## Trigger Map
- `CI`: push to `main`, PRs to `main`
- `Docker`: push to `main`, tag push (`v*`), PRs touching docker/workflow files, manual dispatch
- `Release`: tag push (`v*`)
- `Security Audit`: push to `main`, PRs to `main`, weekly schedule
- `Workflow Sanity`: PR/push when `.github/workflows/**`, `.github/*.yml`, or `.github/*.yaml` change
- `PR Intake Sanity`: `pull_request_target` on opened/reopened/synchronize/edited/ready_for_review
- `Label Policy Sanity`: PR/push when `.github/label-policy.json`, `.github/workflows/labeler.yml`, or `.github/workflows/auto-response.yml` changes
- `PR Labeler`: `pull_request_target` lifecycle events
- `PR Auto Responder`: issue opened/labeled, `pull_request_target` opened/labeled
- `Stale`: daily schedule, manual dispatch
- `Dependabot`: weekly dependency maintenance windows
- `PR Hygiene`: every 12 hours schedule, manual dispatch
## Fast Triage Guide
1. `CI Required Gate` failing: start with `.github/workflows/ci.yml`.
2. Docker failures on PRs: inspect `.github/workflows/docker.yml` `pr-smoke` job.
3. Release failures on tags: inspect `.github/workflows/release.yml`.
4. Security failures: inspect `.github/workflows/security.yml` and `deny.toml`.
5. Workflow syntax/lint failures: inspect `.github/workflows/workflow-sanity.yml`.
6. PR intake failures: inspect `.github/workflows/pr-intake-sanity.yml` sticky comment and run logs.
7. Label policy parity failures: inspect `.github/workflows/label-policy-sanity.yml`.
8. Docs failures in CI: inspect `docs-quality` job logs in `.github/workflows/ci.yml`.
9. Strict delta lint failures in CI: inspect `lint-strict-delta` job logs and compare with `BASE_SHA` diff scope.
## Maintenance Rules
- Keep merge-blocking checks deterministic and reproducible (`--locked` where applicable).
- Keep merge-blocking rust quality policy aligned across `.github/workflows/ci.yml`, `dev/ci.sh`, and `.githooks/pre-push` (`./scripts/ci/rust_quality_gate.sh` + `./scripts/ci/rust_strict_delta_gate.sh`).
- Use `./scripts/ci/rust_strict_delta_gate.sh` (or `./dev/ci.sh lint-delta`) as the incremental strict merge gate for changed Rust lines.
- Run full strict lint audits regularly via `./scripts/ci/rust_quality_gate.sh --strict` (for example through `./dev/ci.sh lint-strict`) and track cleanup in focused PRs.
- Keep docs markdown gating incremental via `./scripts/ci/docs_quality_gate.sh` (block changed-line issues, report baseline issues separately).
- Keep docs link gating incremental via `./scripts/ci/collect_changed_links.py` + lychee (check only links added on changed lines).
- Prefer explicit workflow permissions (least privilege).
- Keep Actions source policy restricted to approved allowlist patterns (see `docs/actions-source-policy.md`).
- Use path filters for expensive workflows when practical.
- Keep docs quality checks low-noise (incremental markdown + incremental added-link checks).
- Keep dependency update volume controlled (grouping + PR limits).
- Avoid mixing onboarding/community automation with merge-gating logic.
## Automation Side-Effect Controls
- Prefer deterministic automation that can be manually overridden (`risk: manual`) when context is nuanced.
- Keep auto-response comments deduplicated to prevent triage noise.
- Keep auto-close behavior scoped to issues; maintainers own PR close/merge decisions.
- If automation is wrong, correct labels first, then continue review with explicit rationale.
- Use `superseded` / `stale-candidate` labels to prune duplicate or dormant PRs before deep review.

View file

@ -0,0 +1,37 @@
# Arduino Uno
## Pin Aliases
| alias | pin |
|-------------|-----|
| red_led | 13 |
| builtin_led | 13 |
| user_led | 13 |
## Overview
Arduino Uno is a microcontroller board based on the ATmega328P. It has 14 digital I/O pins (013) and 6 analog inputs (A0A5).
## Digital Pins
- **Pins 013:** Digital I/O. Can be INPUT or OUTPUT.
- **Pin 13:** Built-in LED (onboard). Connect LED to GND or use for output.
- **Pins 01:** Also used for Serial (RX/TX). Avoid if using Serial.
## GPIO
- `digitalWrite(pin, HIGH)` or `digitalWrite(pin, LOW)` for output.
- `digitalRead(pin)` for input (returns 0 or 1).
- Pin numbers in ZeroClaw protocol: 013.
## Serial
- UART on pins 0 (RX) and 1 (TX).
- USB via ATmega16U2 or CH340 (clones).
- Baud rate: 115200 for ZeroClaw firmware.
## ZeroClaw Tools
- `gpio_read`: Read pin value (0 or 1).
- `gpio_write`: Set pin high (1) or low (0).
- `arduino_upload`: Agent generates full Arduino sketch code; ZeroClaw compiles and uploads it via arduino-cli. Use for "make a heart", custom patterns — agent writes the code, no manual editing. Pin 13 = built-in LED.

22
docs/datasheets/esp32.md Normal file
View file

@ -0,0 +1,22 @@
# ESP32 GPIO Reference
## Pin Aliases
| alias | pin |
|-------------|-----|
| builtin_led | 2 |
| red_led | 2 |
## Common pins (ESP32 / ESP32-C3)
- **GPIO 2**: Built-in LED on many dev boards (output)
- **GPIO 13**: General-purpose output
- **GPIO 21/20**: Often used for UART0 TX/RX (avoid if using serial)
## Protocol
ZeroClaw host sends JSON over serial (115200 baud):
- `gpio_read`: `{"id":"1","cmd":"gpio_read","args":{"pin":13}}`
- `gpio_write`: `{"id":"1","cmd":"gpio_write","args":{"pin":13,"value":1}}`
Response: `{"id":"1","ok":true,"result":"0"}` or `{"id":"1","ok":true,"result":"done"}`

View file

@ -0,0 +1,16 @@
# Nucleo-F401RE GPIO
## Pin Aliases
| alias | pin |
|-------------|-----|
| red_led | 13 |
| user_led | 13 |
| ld2 | 13 |
| builtin_led | 13 |
## GPIO
Pin 13: User LED (LD2)
- Output, active high
- PA5 on STM32F401

View file

@ -0,0 +1,312 @@
# Frictionless Security: Zero Impact on Wizard
## Core Principle
> **"Security features should be like airbags — present, protective, and invisible until needed."**
## Design: Silent Auto-Detection
### 1. No New Wizard Steps (Stays 9 Steps, < 60 Seconds)
```rust
// Wizard remains UNCHANGED
// Security features auto-detect in background
pub fn run_wizard() -> Result<Config> {
// ... existing 9 steps, no changes ...
let config = Config {
// ... existing fields ...
// NEW: Auto-detected security (not shown in wizard)
security: SecurityConfig::autodetect(), // Silent!
};
config.save()?;
Ok(config)
}
```
### 2. Auto-Detection Logic (Runs Once at First Start)
```rust
// src/security/detect.rs
impl SecurityConfig {
/// Detect available sandboxing and enable automatically
/// Returns smart defaults based on platform + available tools
pub fn autodetect() -> Self {
Self {
// Sandbox: prefer Landlock (native), then Firejail, then none
sandbox: SandboxConfig::autodetect(),
// Resource limits: always enable monitoring
resources: ResourceLimits::default(),
// Audit: enable by default, log to config dir
audit: AuditConfig::default(),
// Everything else: safe defaults
..SecurityConfig::default()
}
}
}
impl SandboxConfig {
pub fn autodetect() -> Self {
#[cfg(target_os = "linux")]
{
// Prefer Landlock (native, no dependency)
if Self::probe_landlock() {
return Self {
enabled: true,
backend: SandboxBackend::Landlock,
..Self::default()
};
}
// Fallback: Firejail if installed
if Self::probe_firejail() {
return Self {
enabled: true,
backend: SandboxBackend::Firejail,
..Self::default()
};
}
}
#[cfg(target_os = "macos")]
{
// Try Bubblewrap on macOS
if Self::probe_bubblewrap() {
return Self {
enabled: true,
backend: SandboxBackend::Bubblewrap,
..Self::default()
};
}
}
// Fallback: disabled (but still has application-layer security)
Self {
enabled: false,
backend: SandboxBackend::None,
..Self::default()
}
}
#[cfg(target_os = "linux")]
fn probe_landlock() -> bool {
// Try creating a minimal Landlock ruleset
// If it works, kernel supports Landlock
landlock::Ruleset::new()
.set_access_fs(landlock::AccessFS::read_file)
.add_path(Path::new("/tmp"), landlock::AccessFS::read_file)
.map(|ruleset| ruleset.restrict_self().is_ok())
.unwrap_or(false)
}
fn probe_firejail() -> bool {
// Check if firejail command exists
std::process::Command::new("firejail")
.arg("--version")
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
}
```
### 3. First Run: Silent Logging
```bash
$ zeroclaw agent -m "hello"
# First time: silent detection
[INFO] Detecting security features...
[INFO] ✓ Landlock sandbox enabled (kernel 6.2+)
[INFO] ✓ Memory monitoring active (512MB limit)
[INFO] ✓ Audit logging enabled (~/.config/zeroclaw/audit.log)
# Subsequent runs: quiet
$ zeroclaw agent -m "hello"
[agent] Thinking...
```
### 4. Config File: All Defaults Hidden
```toml
# ~/.config/zeroclaw/config.toml
# These sections are NOT written unless user customizes
# [security.sandbox]
# enabled = true # (default, auto-detected)
# backend = "landlock" # (default, auto-detected)
# [security.resources]
# max_memory_mb = 512 # (default)
# [security.audit]
# enabled = true # (default)
```
Only when user changes something:
```toml
[security.sandbox]
enabled = false # User explicitly disabled
[security.resources]
max_memory_mb = 1024 # User increased limit
```
### 5. Advanced Users: Explicit Control
```bash
# Check what's active
$ zeroclaw security --status
Security Status:
✓ Sandbox: Landlock (Linux kernel 6.2)
✓ Memory monitoring: 512MB limit
✓ Audit logging: ~/.config/zeroclaw/audit.log
→ 47 events logged today
# Disable sandbox explicitly (writes to config)
$ zeroclaw config set security.sandbox.enabled false
# Enable specific backend
$ zeroclaw config set security.sandbox.backend firejail
# Adjust limits
$ zeroclaw config set security.resources.max_memory_mb 2048
```
### 6. Graceful Degradation
| Platform | Best Available | Fallback | Worst Case |
|----------|---------------|----------|------------|
| **Linux 5.13+** | Landlock | None | App-layer only |
| **Linux (any)** | Firejail | Landlock | App-layer only |
| **macOS** | Bubblewrap | None | App-layer only |
| **Windows** | None | - | App-layer only |
**App-layer security is always present** — this is the existing allowlist/path blocking/injection protection that's already comprehensive.
---
## Config Schema Extension
```rust
// src/config/schema.rs
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SecurityConfig {
/// Sandbox configuration (auto-detected if not set)
#[serde(default)]
pub sandbox: SandboxConfig,
/// Resource limits (defaults applied if not set)
#[serde(default)]
pub resources: ResourceLimits,
/// Audit logging (enabled by default)
#[serde(default)]
pub audit: AuditConfig,
}
impl Default for SecurityConfig {
fn default() -> Self {
Self {
sandbox: SandboxConfig::autodetect(), // Silent detection!
resources: ResourceLimits::default(),
audit: AuditConfig::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SandboxConfig {
/// Enable sandboxing (default: auto-detected)
#[serde(default)]
pub enabled: Option<bool>, // None = auto-detect
/// Sandbox backend (default: auto-detect)
#[serde(default)]
pub backend: SandboxBackend,
/// Custom Firejail args (optional)
#[serde(default)]
pub firejail_args: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SandboxBackend {
Auto, // Auto-detect (default)
Landlock, // Linux kernel LSM
Firejail, // User-space sandbox
Bubblewrap, // User namespaces
Docker, // Container (heavy)
None, // Disabled
}
impl Default for SandboxBackend {
fn default() -> Self {
Self::Auto // Always auto-detect by default
}
}
```
---
## User Experience Comparison
### Before (Current)
```bash
$ zeroclaw onboard
[1/9] Workspace Setup...
[2/9] AI Provider...
...
[9/9] Workspace Files...
✓ Security: Supervised | workspace-scoped
```
### After (With Frictionless Security)
```bash
$ zeroclaw onboard
[1/9] Workspace Setup...
[2/9] AI Provider...
...
[9/9] Workspace Files...
✓ Security: Supervised | workspace-scoped | Landlock sandbox ✓
# ↑ Just one extra word, silent auto-detection!
```
### Advanced User (Explicit Control)
```bash
$ zeroclaw onboard --security-level paranoid
[1/9] Workspace Setup...
...
✓ Security: Paranoid | Landlock + Firejail | Audit signed
```
---
## Backward Compatibility
| Scenario | Behavior |
|----------|----------|
| **Existing config** | Works unchanged, new features opt-in |
| **New install** | Auto-detects and enables available security |
| **No sandbox available** | Falls back to app-layer (still secure) |
| **User disables** | One config flag: `sandbox.enabled = false` |
---
## Summary
**Zero impact on wizard** — stays 9 steps, < 60 seconds
**Zero new prompts** — silent auto-detection
**Zero breaking changes** — backward compatible
**Opt-out available** — explicit config flags
**Status visibility**`zeroclaw security --status`
The wizard remains "quick setup universal applications" — security is just **quietly better**.

View file

@ -0,0 +1,324 @@
# Hardware Peripherals Design — ZeroClaw
ZeroClaw enables microcontrollers (MCUs) and Single Board Computers (SBCs) to **dynamically interpret natural language commands**, generate hardware-specific code, and execute peripheral interactions in real-time.
## 1. Vision
**Goal:** ZeroClaw acts as a hardware-aware AI agent that:
- Receives natural language triggers (e.g. "Move X arm", "Turn on LED") via channels (WhatsApp, Telegram)
- Fetches accurate hardware documentation (datasheets, register maps)
- Synthesizes Rust code/logic using an LLM (Gemini, local open-source models)
- Executes the logic to manipulate peripherals (GPIO, I2C, SPI)
- Persists optimized code for future reuse
**Mental model:** ZeroClaw = brain that understands hardware. Peripherals = arms and legs it controls.
## 2. Two Modes of Operation
### Mode 1: Edge-Native (Standalone)
**Target:** Wi-Fi-enabled boards (ESP32, Raspberry Pi).
ZeroClaw runs **directly on the device**. The board spins up a gRPC/nanoRPC server and communicates with peripherals locally.
```
┌─────────────────────────────────────────────────────────────────────────────┐
│ ZeroClaw on ESP32 / Raspberry Pi (Edge-Native) │
│ │
│ ┌─────────────┐ ┌──────────────┐ ┌─────────────────────────────────┐ │
│ │ Channels │───►│ Agent Loop │───►│ RAG: datasheets, register maps │ │
│ │ WhatsApp │ │ (LLM calls) │ │ → LLM context │ │
│ │ Telegram │ └──────┬───────┘ └─────────────────────────────────┘ │
│ └─────────────┘ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────────────────────┐│
│ │ Code synthesis → Wasm / dynamic exec → GPIO / I2C / SPI → persist ││
│ └─────────────────────────────────────────────────────────────────────────┘│
│ │
│ gRPC/nanoRPC server ◄──► Peripherals (GPIO, I2C, SPI, sensors, actuators) │
└─────────────────────────────────────────────────────────────────────────────┘
```
**Workflow:**
1. User sends WhatsApp: *"Turn on LED on pin 13"*
2. ZeroClaw fetches board-specific docs (e.g. ESP32 GPIO mapping)
3. LLM synthesizes Rust code
4. Code runs in a sandbox (Wasm or dynamic linking)
5. GPIO is toggled; result returned to user
6. Optimized code is persisted for future "Turn on LED" requests
**All happens on-device.** No host required.
### Mode 2: Host-Mediated (Development / Debugging)
**Target:** Hardware connected via USB / J-Link / Aardvark to a host (macOS, Linux).
ZeroClaw runs on the **host** and maintains a hardware-aware link to the target. Used for development, introspection, and flashing.
```
┌─────────────────────┐ ┌──────────────────────────────────┐
│ ZeroClaw on Mac │ USB / J-Link / │ STM32 Nucleo-F401RE │
│ │ Aardvark │ (or other MCU) │
│ - Channels │ ◄────────────────► │ - Memory map │
│ - LLM │ │ - Peripherals (GPIO, ADC, I2C) │
│ - Hardware probe │ VID/PID │ - Flash / RAM │
│ - Flash / debug │ discovery │ │
└─────────────────────┘ └──────────────────────────────────┘
```
**Workflow:**
1. User sends Telegram: *"What are the readable memory addresses on this USB device?"*
2. ZeroClaw identifies connected hardware (VID/PID, architecture)
3. Performs memory mapping; suggests available address spaces
4. Returns result to user
**Or:**
1. User: *"Flash this firmware to the Nucleo"*
2. ZeroClaw writes/flashes via OpenOCD or probe-rs
3. Confirms success
**Or:**
1. ZeroClaw auto-discovers: *"STM32 Nucleo on /dev/ttyACM0, ARM Cortex-M4"*
2. Suggests: *"I can read/write GPIO, ADC, flash. What would you like to do?"*
---
### Mode Comparison
| Aspect | Edge-Native | Host-Mediated |
|------------------|--------------------------------|----------------------------------|
| ZeroClaw runs on | Device (ESP32, RPi) | Host (Mac, Linux) |
| Hardware link | Local (GPIO, I2C, SPI) | USB, J-Link, Aardvark |
| LLM | On-device or cloud (Gemini) | Host (cloud or local) |
| Use case | Production, standalone | Dev, debug, introspection |
| Channels | WhatsApp, etc. (via WiFi) | Telegram, CLI, etc. |
## 3. Legacy / Simpler Modes (Pre-LLM-on-Edge)
For boards without WiFi or before full Edge-Native is ready:
### Mode A: Host + Remote Peripheral (STM32 via serial)
Host runs ZeroClaw; peripheral runs minimal firmware. Simple JSON over serial.
### Mode B: RPi as Host (Native GPIO)
ZeroClaw on Pi; GPIO via rppal or sysfs. No separate firmware.
## 4. Technical Requirements
| Requirement | Description |
|-------------|-------------|
| **Language** | Pure Rust. `no_std` where applicable for embedded targets (STM32, ESP32). |
| **Communication** | Lightweight gRPC or nanoRPC stack for low-latency command processing. |
| **Dynamic execution** | Safely run LLM-generated logic on-the-fly: Wasm runtime for isolation, or dynamic linking where supported. |
| **Documentation retrieval** | RAG (Retrieval-Augmented Generation) pipeline to feed datasheet snippets, register maps, and pinouts into LLM context. |
| **Hardware discovery** | VID/PID-based identification for USB devices; architecture detection (ARM Cortex-M, RISC-V, etc.). |
### RAG Pipeline (Datasheet Retrieval)
- **Index:** Datasheets, reference manuals, register maps (PDF → chunks, embeddings).
- **Retrieve:** On user query ("turn on LED"), fetch relevant snippets (e.g. GPIO section for target board).
- **Inject:** Add to LLM system prompt or context.
- **Result:** LLM generates accurate, board-specific code.
### Dynamic Execution Options
| Option | Pros | Cons |
|-------|------|------|
| **Wasm** | Sandboxed, portable, no FFI | Overhead; limited HW access from Wasm |
| **Dynamic linking** | Native speed, full HW access | Platform-specific; security concerns |
| **Interpreted DSL** | Safe, auditable | Slower; limited expressiveness |
| **Pre-compiled templates** | Fast, secure | Less flexible; requires template library |
**Recommendation:** Start with pre-compiled templates + parameterization; evolve to Wasm for user-defined logic once stable.
## 5. CLI and Config
### CLI Flags
```bash
# Edge-Native: run on device (ESP32, RPi)
zeroclaw agent --mode edge
# Host-Mediated: connect to USB/J-Link target
zeroclaw agent --peripheral nucleo-f401re:/dev/ttyACM0
zeroclaw agent --probe jlink
# Hardware introspection
zeroclaw hardware discover
zeroclaw hardware introspect /dev/ttyACM0
```
### Config (config.toml)
```toml
[peripherals]
enabled = true
mode = "host" # "edge" | "host"
datasheet_dir = "docs/datasheets" # RAG: board-specific docs for LLM context
[[peripherals.boards]]
board = "nucleo-f401re"
transport = "serial"
path = "/dev/ttyACM0"
baud = 115200
[[peripherals.boards]]
board = "rpi-gpio"
transport = "native"
[[peripherals.boards]]
board = "esp32"
transport = "wifi"
# Edge-Native: ZeroClaw runs on ESP32
```
## 6. Architecture: Peripheral as Extension Point
### New Trait: `Peripheral`
```rust
/// A hardware peripheral that exposes capabilities as tools.
#[async_trait]
pub trait Peripheral: Send + Sync {
fn name(&self) -> &str;
fn board_type(&self) -> &str; // e.g. "nucleo-f401re", "rpi-gpio"
async fn connect(&mut self) -> anyhow::Result<()>;
async fn disconnect(&mut self) -> anyhow::Result<()>;
async fn health_check(&self) -> bool;
/// Tools this peripheral provides (gpio_read, gpio_write, sensor_read, etc.)
fn tools(&self) -> Vec<Box<dyn Tool>>;
}
```
### Flow
1. **Startup:** ZeroClaw loads config, sees `peripherals.boards`.
2. **Connect:** For each board, create a `Peripheral` impl, call `connect()`.
3. **Tools:** Collect tools from all connected peripherals; merge with default tools.
4. **Agent loop:** Agent can call `gpio_write`, `sensor_read`, etc. — these delegate to the peripheral.
5. **Shutdown:** Call `disconnect()` on each peripheral.
### Board Support
| Board | Transport | Firmware / Driver | Tools |
|--------------------|-----------|------------------------|--------------------------|
| nucleo-f401re | serial | Zephyr / Embassy | gpio_read, gpio_write, adc_read |
| rpi-gpio | native | rppal or sysfs | gpio_read, gpio_write |
| esp32 | serial/ws | ESP-IDF / Embassy | gpio, wifi, mqtt |
## 7. Communication Protocols
### gRPC / nanoRPC (Edge-Native, Host-Mediated)
For low-latency, typed RPC between ZeroClaw and peripherals:
- **nanoRPC** or **tonic** (gRPC): Protobuf-defined services.
- Methods: `GpioWrite`, `GpioRead`, `I2cTransfer`, `SpiTransfer`, `MemoryRead`, `FlashWrite`, etc.
- Enables streaming, bidirectional calls, and code generation from `.proto` files.
### Serial Fallback (Host-Mediated, legacy)
Simple JSON over serial for boards without gRPC support:
**Request (host → peripheral):**
```json
{"id":"1","cmd":"gpio_write","args":{"pin":13,"value":1}}
```
**Response (peripheral → host):**
```json
{"id":"1","ok":true,"result":"done"}
```
## 8. Firmware (Separate Repo or Crate)
- **zeroclaw-firmware** or **zeroclaw-peripheral** — a separate crate/workspace.
- Targets: `thumbv7em-none-eabihf` (STM32), `armv7-unknown-linux-gnueabihf` (RPi), etc.
- Uses `embassy` or Zephyr for STM32.
- Implements the protocol above.
- User flashes this to the board; ZeroClaw connects and discovers capabilities.
## 9. Implementation Phases
### Phase 1: Skeleton ✅ (Done)
- [x] Add `Peripheral` trait, config schema, CLI (`zeroclaw peripheral list/add`)
- [x] Add `--peripheral` flag to agent
- [x] Document in AGENTS.md
### Phase 2: Host-Mediated — Hardware Discovery ✅ (Done)
- [x] `zeroclaw hardware discover`: enumerate USB devices (VID/PID)
- [x] Board registry: map VID/PID → architecture, name (e.g. Nucleo-F401RE)
- [x] `zeroclaw hardware introspect <path>`: memory map, peripheral list
### Phase 3: Host-Mediated — Serial / J-Link
- [x] `SerialPeripheral` for STM32 over USB CDC
- [ ] probe-rs or OpenOCD integration for flash/debug
- [x] Tools: `gpio_read`, `gpio_write` (memory_read, flash_write in future)
### Phase 4: RAG Pipeline ✅ (Done)
- [x] Datasheet index (markdown/text → chunks)
- [x] Retrieve-and-inject into LLM context on hardware-related queries
- [x] Board-specific prompt augmentation
**Usage:** Add `datasheet_dir = "docs/datasheets"` to `[peripherals]` in config.toml. Place `.md` or `.txt` files named by board (e.g. `nucleo-f401re.md`, `rpi-gpio.md`). Files in `_generic/` or named `generic.md` apply to all boards. Chunks are retrieved by keyword match and injected into the user message context.
### Phase 5: Edge-Native — RPi ✅ (Done)
- [x] ZeroClaw on Raspberry Pi (native GPIO via rppal)
- [ ] gRPC/nanoRPC server for local peripheral access
- [ ] Code persistence (store synthesized snippets)
### Phase 6: Edge-Native — ESP32
- [x] Host-mediated ESP32 (serial transport) — same JSON protocol as STM32
- [x] `zeroclaw-esp32` firmware crate (`firmware/zeroclaw-esp32`) — GPIO over UART
- [x] ESP32 in hardware registry (CH340 VID/PID)
- [ ] ZeroClaw *on* ESP32 (WiFi + LLM, edge-native) — future
- [ ] Wasm or template-based execution for LLM-generated logic
**Usage:** Flash `firmware/zeroclaw-esp32` to ESP32, add `board = "esp32"`, `transport = "serial"`, `path = "/dev/ttyUSB0"` to config.
### Phase 7: Dynamic Execution (LLM-Generated Code)
- [ ] Template library: parameterized GPIO/I2C/SPI snippets
- [ ] Optional: Wasm runtime for user-defined logic (sandboxed)
- [ ] Persist and reuse optimized code paths
## 10. Security Considerations
- **Serial path:** Validate `path` is in allowlist (e.g. `/dev/ttyACM*`, `/dev/ttyUSB*`); never arbitrary paths.
- **GPIO:** Restrict which pins are exposed; avoid power/reset pins.
- **No secrets on peripheral:** Firmware should not store API keys; host handles auth.
## 11. Non-Goals (For Now)
- Running full ZeroClaw *on* bare STM32 (no WiFi, limited RAM) — use Host-Mediated instead
- Real-time guarantees — peripherals are best-effort
- Arbitrary native code execution from LLM — prefer Wasm or templates
## 12. Related Documents
- [adding-boards-and-tools.md](./adding-boards-and-tools.md) — How to add boards and datasheets
- [network-deployment.md](./network-deployment.md) — RPi and network deployment
## 13. References
- [Zephyr RTOS Rust support](https://docs.zephyrproject.org/latest/develop/languages/rust/index.html)
- [Embassy](https://embassy.dev/) — async embedded framework
- [rppal](https://github.com/golemparts/rppal) — Raspberry Pi GPIO in Rust
- [STM32 Nucleo-F401RE](https://www.st.com/en/evaluation-tools/nucleo-f401re.html)
- [tonic](https://github.com/hyperium/tonic) — gRPC for Rust
- [probe-rs](https://probe.rs/) — ARM debug probe, flash, memory access
- [nusb](https://github.com/nic-hartley/nusb) — USB device enumeration (VID/PID)
## 14. Raw Prompt Summary
> *"Boards like ESP, Raspberry Pi, or boards with WiFi can connect to an LLM (Gemini or open-source). ZeroClaw runs on the device, creates its own gRPC, spins it up, and communicates with peripherals. User asks via WhatsApp: 'move X arm' or 'turn on LED'. ZeroClaw gets accurate documentation, writes code, executes it, stores it optimally, runs it, and turns on the LED — all on the development board.*
>
> *For STM Nucleo connected via USB/J-Link/Aardvark to my Mac: ZeroClaw from my Mac accesses the hardware, installs or writes what it wants on the device, and returns the result. Example: 'Hey ZeroClaw, what are the available/readable addresses on this USB device?' It can figure out what's connected where and suggest."*

View file

@ -0,0 +1,239 @@
# LangGraph Integration Guide
This guide explains how to use the `zeroclaw-tools` Python package for consistent tool calling with any OpenAI-compatible LLM provider.
## Background
Some LLM providers, particularly Chinese models like GLM-5 (Zhipu AI), have inconsistent tool calling behavior when using text-based tool invocation. ZeroClaw's Rust core uses structured tool calling via the OpenAI API format, but some models respond better to a different approach.
LangGraph provides a stateful graph execution engine that guarantees consistent tool calling behavior regardless of the underlying model's native capabilities.
## Architecture
```
┌─────────────────────────────────────────────────────────────┐
│ Your Application │
├─────────────────────────────────────────────────────────────┤
│ zeroclaw-tools Agent │
│ │
│ ┌─────────────────────────────────────────────────────┐ │
│ │ LangGraph StateGraph │ │
│ │ │ │
│ │ ┌────────────┐ ┌────────────┐ │ │
│ │ │ Agent │ ──────▶ │ Tools │ │ │
│ │ │ Node │ ◀────── │ Node │ │ │
│ │ └────────────┘ └────────────┘ │ │
│ │ │ │ │ │
│ │ ▼ ▼ │ │
│ │ [Continue?] [Execute Tool] │ │
│ │ │ │ │ │
│ │ Yes │ No Result│ │ │
│ │ ▼ ▼ │ │
│ │ [END] [Back to Agent] │ │
│ │ │ │
│ └─────────────────────────────────────────────────────┘ │
│ │
├─────────────────────────────────────────────────────────────┤
│ OpenAI-Compatible LLM Provider │
│ (Z.AI, OpenRouter, Groq, DeepSeek, Ollama, etc.) │
└─────────────────────────────────────────────────────────────┘
```
## Quick Start
### Installation
```bash
pip install zeroclaw-tools
```
### Basic Usage
```python
import asyncio
from zeroclaw_tools import create_agent, shell, file_read, file_write
from langchain_core.messages import HumanMessage
async def main():
agent = create_agent(
tools=[shell, file_read, file_write],
model="glm-5",
api_key="your-api-key",
base_url="https://api.z.ai/api/coding/paas/v4"
)
result = await agent.ainvoke({
"messages": [HumanMessage(content="Read /etc/hostname and tell me the machine name")]
})
print(result["messages"][-1].content)
asyncio.run(main())
```
## Available Tools
### Core Tools
| Tool | Description |
|------|-------------|
| `shell` | Execute shell commands |
| `file_read` | Read file contents |
| `file_write` | Write content to files |
### Extended Tools
| Tool | Description |
|------|-------------|
| `web_search` | Search the web (requires `BRAVE_API_KEY`) |
| `http_request` | Make HTTP requests |
| `memory_store` | Store data in persistent memory |
| `memory_recall` | Recall stored data |
## Custom Tools
Create your own tools with the `@tool` decorator:
```python
from zeroclaw_tools import tool, create_agent
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
# Your implementation
return f"Weather in {city}: Sunny, 25°C"
@tool
def query_database(sql: str) -> str:
"""Execute a SQL query and return results."""
# Your implementation
return "Query returned 5 rows"
agent = create_agent(
tools=[get_weather, query_database],
model="glm-5",
api_key="your-key"
)
```
## Provider Configuration
### Z.AI / GLM-5
```python
agent = create_agent(
model="glm-5",
api_key="your-zhipu-key",
base_url="https://api.z.ai/api/coding/paas/v4"
)
```
### OpenRouter
```python
agent = create_agent(
model="anthropic/claude-3.5-sonnet",
api_key="your-openrouter-key",
base_url="https://openrouter.ai/api/v1"
)
```
### Groq
```python
agent = create_agent(
model="llama-3.3-70b-versatile",
api_key="your-groq-key",
base_url="https://api.groq.com/openai/v1"
)
```
### Ollama (Local)
```python
agent = create_agent(
model="llama3.2",
base_url="http://localhost:11434/v1"
)
```
## Discord Bot Integration
```python
import os
from zeroclaw_tools.integrations import DiscordBot
bot = DiscordBot(
token=os.environ["DISCORD_TOKEN"],
guild_id=123456789, # Your Discord server ID
allowed_users=["123456789"], # User IDs that can use the bot
api_key=os.environ["API_KEY"],
model="glm-5"
)
bot.run()
```
## CLI Usage
```bash
# Set environment variables
export API_KEY="your-key"
export BRAVE_API_KEY="your-brave-key" # Optional, for web search
# Single message
zeroclaw-tools "What is the current date?"
# Interactive mode
zeroclaw-tools -i
```
## Comparison with Rust ZeroClaw
| Aspect | Rust ZeroClaw | zeroclaw-tools |
|--------|---------------|-----------------|
| **Performance** | Ultra-fast (~10ms startup) | Python startup (~500ms) |
| **Memory** | <5 MB | ~50 MB |
| **Binary size** | ~3.4 MB | pip package |
| **Tool consistency** | Model-dependent | LangGraph guarantees |
| **Extensibility** | Rust traits | Python decorators |
| **Ecosystem** | Rust crates | PyPI packages |
**When to use Rust ZeroClaw:**
- Production edge deployments
- Resource-constrained environments (Raspberry Pi, etc.)
- Maximum performance requirements
**When to use zeroclaw-tools:**
- Models with inconsistent native tool calling
- Python-centric development
- Rapid prototyping
- Integration with Python ML ecosystem
## Troubleshooting
### "API key required" error
Set the `API_KEY` environment variable or pass `api_key` to `create_agent()`.
### Tool calls not executing
Ensure your model supports function calling. Some older models may not support tools.
### Rate limiting
Add delays between calls or implement your own rate limiting:
```python
import asyncio
for message in messages:
result = await agent.ainvoke({"messages": [message]})
await asyncio.sleep(1) # Rate limit
```
## Related Projects
- [rs-graph-llm](https://github.com/a-agmon/rs-graph-llm) - Rust LangGraph alternative
- [langchain-rust](https://github.com/Abraxas-365/langchain-rust) - LangChain for Rust
- [llm-chain](https://github.com/sobelio/llm-chain) - LLM chains in Rust

48
docs/mattermost-setup.md Normal file
View file

@ -0,0 +1,48 @@
# Mattermost Integration Guide
ZeroClaw supports native integration with Mattermost via its REST API v4. This integration is ideal for self-hosted, private, or air-gapped environments where sovereign communication is a requirement.
## Prerequisites
1. **Mattermost Server**: A running Mattermost instance (self-hosted or cloud).
2. **Bot Account**:
- Go to **Main Menu > Integrations > Bot Accounts**.
- Click **Add Bot Account**.
- Set a username (e.g., `zeroclaw-bot`).
- Enable **post:all** and **channel:read** permissions (or appropriate scopes).
- Save the **Access Token**.
3. **Channel ID**:
- Open the Mattermost channel you want the bot to monitor.
- Click the channel header and select **View Info**.
- Copy the **ID** (e.g., `7j8k9l...`).
## Configuration
Add the following to your `config.toml` under the `[channels]` section:
```toml
[channels.mattermost]
url = "https://mm.your-domain.com"
bot_token = "your-bot-access-token"
channel_id = "your-channel-id"
allowed_users = ["user-id-1", "user-id-2"]
```
### Configuration Fields
| Field | Description |
|---|---|
| `url` | The base URL of your Mattermost server. |
| `bot_token` | The Personal Access Token for the bot account. |
| `channel_id` | (Optional) The ID of the channel to listen to. Required for `listen` mode. |
| `allowed_users` | (Optional) A list of Mattermost User IDs permitted to interact with the bot. Use `["*"]` to allow everyone. |
## Threaded Conversations
ZeroClaw automatically supports Mattermost threads.
- If a user sends a message in a thread, ZeroClaw will reply within that same thread.
- If a user sends a top-level message, ZeroClaw will start a thread by replying to that post.
## Security Note
Mattermost integration is designed for **sovereign communication**. By hosting your own Mattermost server, your agent's communication history remains entirely within your own infrastructure, avoiding third-party cloud logging.

203
docs/network-deployment.md Normal file
View file

@ -0,0 +1,203 @@
# Network Deployment — ZeroClaw on Raspberry Pi and Local Network
This document covers deploying ZeroClaw on a Raspberry Pi or other host on your local network, with Telegram and optional webhook channels.
---
## 1. Overview
| Mode | Inbound port needed? | Use case |
|------|----------------------|----------|
| **Telegram polling** | No | ZeroClaw polls Telegram API; works from anywhere |
| **Discord/Slack** | No | Same — outbound only |
| **Gateway webhook** | Yes | POST /webhook, WhatsApp, etc. need a public URL |
| **Gateway pairing** | Yes | If you pair clients via the gateway |
**Key:** Telegram, Discord, and Slack use **long-polling** — ZeroClaw makes outbound requests. No port forwarding or public IP required.
---
## 2. ZeroClaw on Raspberry Pi
### 2.1 Prerequisites
- Raspberry Pi (3/4/5) with Raspberry Pi OS
- USB peripherals (Arduino, Nucleo) if using serial transport
- Optional: `rppal` for native GPIO (`peripheral-rpi` feature)
### 2.2 Install
```bash
# Build for RPi (or cross-compile from host)
cargo build --release --features hardware
# Or install via your preferred method
```
### 2.3 Config
Edit `~/.zeroclaw/config.toml`:
```toml
[peripherals]
enabled = true
[[peripherals.boards]]
board = "rpi-gpio"
transport = "native"
# Or Arduino over USB
[[peripherals.boards]]
board = "arduino-uno"
transport = "serial"
path = "/dev/ttyACM0"
baud = 115200
[channels_config.telegram]
bot_token = "YOUR_BOT_TOKEN"
allowed_users = []
[gateway]
host = "127.0.0.1"
port = 8080
allow_public_bind = false
```
### 2.4 Run Daemon (Local Only)
```bash
zeroclaw daemon --host 127.0.0.1 --port 8080
```
- Gateway binds to `127.0.0.1` — not reachable from other machines
- Telegram channel works: ZeroClaw polls Telegram API (outbound)
- No firewall or port forwarding needed
---
## 3. Binding to 0.0.0.0 (Local Network)
To allow other devices on your LAN to hit the gateway (e.g. for pairing or webhooks):
### 3.1 Option A: Explicit Opt-In
```toml
[gateway]
host = "0.0.0.0"
port = 8080
allow_public_bind = true
```
```bash
zeroclaw daemon --host 0.0.0.0 --port 8080
```
**Security:** `allow_public_bind = true` exposes the gateway to your local network. Only use on trusted LANs.
### 3.2 Option B: Tunnel (Recommended for Webhooks)
If you need a **public URL** (e.g. WhatsApp webhook, external clients):
1. Run gateway on localhost:
```bash
zeroclaw daemon --host 127.0.0.1 --port 8080
```
2. Start a tunnel:
```toml
[tunnel]
provider = "tailscale" # or "ngrok", "cloudflare"
```
Or use `zeroclaw tunnel` (see tunnel docs).
3. ZeroClaw will refuse `0.0.0.0` unless `allow_public_bind = true` or a tunnel is active.
---
## 4. Telegram Polling (No Inbound Port)
Telegram uses **long-polling** by default:
- ZeroClaw calls `https://api.telegram.org/bot{token}/getUpdates`
- No inbound port or public IP needed
- Works behind NAT, on RPi, in a home lab
**Config:**
```toml
[channels_config.telegram]
bot_token = "YOUR_BOT_TOKEN"
allowed_users = [] # deny-by-default, bind identities explicitly
```
Run `zeroclaw daemon` — Telegram channel starts automatically.
To approve one Telegram account at runtime:
```bash
zeroclaw channel bind-telegram <IDENTITY>
```
`<IDENTITY>` can be a numeric Telegram user ID or a username (without `@`).
### 4.1 Single Poller Rule (Important)
Telegram Bot API `getUpdates` supports only one active poller per bot token.
- Keep one runtime instance for the same token (recommended: `zeroclaw daemon` service).
- Do not run `cargo run -- channel start` or another bot process at the same time.
If you hit this error:
`Conflict: terminated by other getUpdates request`
you have a polling conflict. Stop extra instances and restart only one daemon.
---
## 5. Webhook Channels (WhatsApp, Custom)
Webhook-based channels need a **public URL** so Meta (WhatsApp) or your client can POST events.
### 5.1 Tailscale Funnel
```toml
[tunnel]
provider = "tailscale"
```
Tailscale Funnel exposes your gateway via a `*.ts.net` URL. No port forwarding.
### 5.2 ngrok
```toml
[tunnel]
provider = "ngrok"
```
Or run ngrok manually:
```bash
ngrok http 8080
# Use the HTTPS URL for your webhook
```
### 5.3 Cloudflare Tunnel
Configure Cloudflare Tunnel to forward to `127.0.0.1:8080`, then set your webhook URL to the tunnel's public hostname.
---
## 6. Checklist: RPi Deployment
- [ ] Build with `--features hardware` (and `peripheral-rpi` if using native GPIO)
- [ ] Configure `[peripherals]` and `[channels_config.telegram]`
- [ ] Run `zeroclaw daemon --host 127.0.0.1 --port 8080` (Telegram works without 0.0.0.0)
- [ ] For LAN access: `--host 0.0.0.0` + `allow_public_bind = true` in config
- [ ] For webhooks: use Tailscale, ngrok, or Cloudflare tunnel
---
## 7. References
- [hardware-peripherals-design.md](./hardware-peripherals-design.md) — Peripherals design
- [adding-boards-and-tools.md](./adding-boards-and-tools.md) — Hardware setup and adding boards

147
docs/nucleo-setup.md Normal file
View file

@ -0,0 +1,147 @@
# ZeroClaw on Nucleo-F401RE — Step-by-Step Guide
Run ZeroClaw on your Mac or Linux host. Connect a Nucleo-F401RE via USB. Control GPIO (LED, pins) via Telegram or CLI.
---
## Get Board Info via Telegram (No Firmware Needed)
ZeroClaw can read chip info from the Nucleo over USB **without flashing any firmware**. Message your Telegram bot:
- *"What board info do I have?"*
- *"Board info"*
- *"What hardware is connected?"*
- *"Chip info"*
The agent uses the `hardware_board_info` tool to return chip name, architecture, and memory map. With the `probe` feature, it reads live data via USB/SWD; otherwise it returns static datasheet info.
**Config:** Add Nucleo to `config.toml` first (so the agent knows which board to query):
```toml
[[peripherals.boards]]
board = "nucleo-f401re"
transport = "serial"
path = "/dev/ttyACM0"
baud = 115200
```
**CLI alternative:**
```bash
cargo build --features hardware,probe
zeroclaw hardware info
zeroclaw hardware discover
```
---
## What's Included (No Code Changes Needed)
ZeroClaw includes everything for Nucleo-F401RE:
| Component | Location | Purpose |
|-----------|----------|---------|
| Firmware | `firmware/zeroclaw-nucleo/` | Embassy Rust — USART2 (115200), gpio_read, gpio_write |
| Serial peripheral | `src/peripherals/serial.rs` | JSON-over-serial protocol (same as Arduino/ESP32) |
| Flash command | `zeroclaw peripheral flash-nucleo` | Builds firmware, flashes via probe-rs |
Protocol: newline-delimited JSON. Request: `{"id":"1","cmd":"gpio_write","args":{"pin":13,"value":1}}`. Response: `{"id":"1","ok":true,"result":"done"}`.
---
## Prerequisites
- Nucleo-F401RE board
- USB cable (USB-A to Mini-USB; Nucleo has built-in ST-Link)
- For flashing: `cargo install probe-rs-tools --locked` (or use the [install script](https://probe.rs/docs/getting-started/installation/))
---
## Phase 1: Flash Firmware
### 1.1 Connect Nucleo
1. Connect Nucleo to your Mac/Linux via USB.
2. The board appears as a USB device (ST-Link). No separate driver needed on modern systems.
### 1.2 Flash via ZeroClaw
From the zeroclaw repo root:
```bash
zeroclaw peripheral flash-nucleo
```
This builds `firmware/zeroclaw-nucleo` and runs `probe-rs run --chip STM32F401RETx`. The firmware runs immediately after flashing.
### 1.3 Manual Flash (Alternative)
```bash
cd firmware/zeroclaw-nucleo
cargo build --release --target thumbv7em-none-eabihf
probe-rs run --chip STM32F401RETx target/thumbv7em-none-eabihf/release/zeroclaw-nucleo
```
---
## Phase 2: Find Serial Port
- **macOS:** `/dev/cu.usbmodem*` or `/dev/tty.usbmodem*` (e.g. `/dev/cu.usbmodem101`)
- **Linux:** `/dev/ttyACM0` (or check `dmesg` after plugging in)
USART2 (PA2/PA3) is bridged to the ST-Link's virtual COM port, so the host sees one serial device.
---
## Phase 3: Configure ZeroClaw
Add to `~/.zeroclaw/config.toml`:
```toml
[peripherals]
enabled = true
[[peripherals.boards]]
board = "nucleo-f401re"
transport = "serial"
path = "/dev/cu.usbmodem101" # adjust to your port
baud = 115200
```
---
## Phase 4: Run and Test
```bash
zeroclaw daemon --host 127.0.0.1 --port 8080
```
Or use the agent directly:
```bash
zeroclaw agent --message "Turn on the LED on pin 13"
```
Pin 13 = PA5 = User LED (LD2) on Nucleo-F401RE.
---
## Summary: Commands
| Step | Command |
|------|---------|
| 1 | Connect Nucleo via USB |
| 2 | `cargo install probe-rs --locked` |
| 3 | `zeroclaw peripheral flash-nucleo` |
| 4 | Add Nucleo to config.toml (path = your serial port) |
| 5 | `zeroclaw daemon` or `zeroclaw agent -m "Turn on LED"` |
---
## Troubleshooting
- **flash-nucleo unrecognized** — Build from repo: `cargo run --features hardware -- peripheral flash-nucleo`. The subcommand is only in the repo build, not in crates.io installs.
- **probe-rs not found**`cargo install probe-rs-tools --locked` (the `probe-rs` crate is a library; the CLI is in `probe-rs-tools`)
- **No probe detected** — Ensure Nucleo is connected. Try another USB cable/port.
- **Serial port not found** — On Linux, add user to `dialout`: `sudo usermod -a -G dialout $USER`, then log out/in.
- **GPIO commands ignored** — Check `path` in config matches your serial port. Run `zeroclaw peripheral list` to verify.

261
docs/pr-workflow.md Normal file
View file

@ -0,0 +1,261 @@
# ZeroClaw PR Workflow (High-Volume Collaboration)
This document defines how ZeroClaw handles high PR volume while maintaining:
- High performance
- High efficiency
- High stability
- High extensibility
- High sustainability
- High security
Related references:
- [`docs/ci-map.md`](ci-map.md) for per-workflow ownership, triggers, and triage flow.
- [`docs/reviewer-playbook.md`](reviewer-playbook.md) for day-to-day reviewer execution.
## 1) Governance Goals
1. Keep merge throughput predictable under heavy PR load.
2. Keep CI signal quality high (fast feedback, low false positives).
3. Keep security review explicit for risky surfaces.
4. Keep changes easy to reason about and easy to revert.
5. Keep repository artifacts free of personal/sensitive data leakage.
### Governance Design Logic (Control Loop)
This workflow is intentionally layered to reduce reviewer load while keeping accountability clear:
1. **Intake classification**: path/size/risk/module labels route the PR to the right review depth.
2. **Deterministic validation**: merge gate depends on reproducible checks, not subjective comments.
3. **Risk-based review depth**: high-risk paths trigger deep review; low-risk paths stay fast.
4. **Rollback-first merge contract**: every merge path includes concrete recovery steps.
Automation assists with triage and guardrails, but final merge accountability remains with human maintainers and PR authors.
## 2) Required Repository Settings
Maintain these branch protection rules on `main`:
- Require status checks before merge.
- Require check `CI Required Gate`.
- Require pull request reviews before merge.
- Require CODEOWNERS review for protected paths.
- For `.github/workflows/**`, require owner approval via `CI Required Gate` (`WORKFLOW_OWNER_LOGINS`) and keep branch/ruleset bypass limited to org owners.
- Dismiss stale approvals when new commits are pushed.
- Restrict force-push on protected branches.
## 3) PR Lifecycle
### Step A: Intake
- Contributor opens PR with full `.github/pull_request_template.md`.
- `PR Labeler` applies scope/path labels + size labels + risk labels + module labels (for example `channel:telegram`, `provider:kimi`, `tool:shell`) and contributor tiers by merged PR count (`trusted` >=5, `experienced` >=10, `principal` >=20, `distinguished` >=50), while de-duplicating less-specific scope labels when a more specific module label is present.
- For all module prefixes, module labels are compacted to reduce noise: one specific module keeps `prefix:component`, but multiple specifics collapse to the base scope label `prefix`.
- Label ordering is priority-first: `risk:*` -> `size:*` -> contributor tier -> module/path labels.
- Maintainers can run `PR Labeler` manually (`workflow_dispatch`) in `audit` mode for drift visibility or `repair` mode to normalize managed label metadata repository-wide.
- Hovering a label in GitHub shows its auto-managed description (rule/threshold summary).
- Managed label colors are arranged by display order to create a smooth gradient across long label rows.
- `PR Auto Responder` posts first-time guidance, handles label-driven routing for low-signal items, and auto-applies issue contributor tiers using the same thresholds as `PR Labeler` (`trusted` >=5, `experienced` >=10, `principal` >=20, `distinguished` >=50).
### Step B: Validation
- `CI Required Gate` is the merge gate.
- Docs-only PRs use fast-path and skip heavy Rust jobs.
- Non-doc PRs must pass lint, tests, and release build smoke check.
### Step C: Review
- Reviewers prioritize by risk and size labels.
- Security-sensitive paths (`src/security`, `src/runtime`, `src/gateway`, and CI workflows) require maintainer attention.
- Large PRs (`size: L`/`size: XL`) should be split unless strongly justified.
### Step D: Merge
- Prefer **squash merge** to keep history compact.
- PR title should follow Conventional Commit style.
- Merge only when rollback path is documented.
## 4) PR Readiness Contracts (DoR / DoD)
### Definition of Ready (before requesting review)
- PR template fully completed.
- Scope boundary is explicit (what changed / what did not).
- Validation evidence attached (not just "CI will check").
- Security and rollback fields completed for risky paths.
- Privacy/data-hygiene checks are completed and test language is neutral/project-scoped.
- If identity-like wording appears in tests/examples, it is normalized to ZeroClaw/project-native labels.
### Definition of Done (merge-ready)
- `CI Required Gate` is green.
- Required reviewers approved (including CODEOWNERS paths).
- Risk class labels match touched paths.
- Migration/compatibility impact is documented.
- Rollback path is concrete and fast.
## 5) PR Size Policy
- `size: XS` <= 80 changed lines
- `size: S` <= 250 changed lines
- `size: M` <= 500 changed lines
- `size: L` <= 1000 changed lines
- `size: XL` > 1000 changed lines
Policy:
- Target `XS/S/M` by default.
- `L/XL` PRs need explicit justification and tighter test evidence.
- If a large feature is unavoidable, split into stacked PRs.
Automation behavior:
- `PR Labeler` applies `size:*` labels from effective changed lines.
- Docs-only/lockfile-heavy PRs are normalized to avoid size inflation.
## 6) AI/Agent Contribution Policy
AI-assisted PRs are welcome, and review can also be agent-assisted.
Required:
1. Clear PR summary with scope boundary.
2. Explicit test/validation evidence.
3. Security impact and rollback notes for risky changes.
Recommended:
1. Brief tool/workflow notes when automation materially influenced the change.
2. Optional prompt/plan snippets for reproducibility.
We do **not** require contributors to quantify AI-vs-human line ownership.
Review emphasis for AI-heavy PRs:
- Contract compatibility
- Security boundaries
- Error handling and fallback behavior
- Performance and memory regressions
## 7) Review SLA and Queue Discipline
- First maintainer triage target: within 48 hours.
- If PR is blocked, maintainer leaves one actionable checklist.
- `stale` automation is used to keep queue healthy; maintainers can apply `no-stale` when needed.
- `pr-hygiene` automation checks open PRs every 12 hours and posts a nudge when a PR has no new commits for 48+ hours and is either behind `main` or missing/failing `CI Required Gate` on the head commit.
Backlog pressure controls:
- Use a review queue budget: limit concurrent deep-review PRs per maintainer and keep the rest in triage state.
- For stacked work, require explicit `Depends on #...` so review order is deterministic.
- If a new PR replaces an older open PR, require `Supersedes #...` and close the older one after maintainer confirmation.
- Mark dormant/redundant PRs with `stale-candidate` or `superseded` to reduce duplicate review effort.
Issue triage discipline:
- `r:needs-repro` for incomplete bug reports (request deterministic repro before deep triage).
- `r:support` for usage/help items better handled outside bug backlog.
- `invalid` / `duplicate` labels trigger **issue-only** closing automation with guidance.
Automation side-effect guards:
- `PR Auto Responder` deduplicates label-based comments to avoid spam.
- Automated close routes are limited to issues, not PRs.
- Maintainers can freeze automated risk recalculation with `risk: manual` when context demands human override.
## 8) Security and Stability Rules
Changes in these areas require stricter review and stronger test evidence:
- `src/security/**`
- runtime process management
- gateway ingress/authentication behavior (`src/gateway/**`)
- filesystem access boundaries
- network/authentication behavior
- GitHub workflows and release pipeline
- tools with execution capability (`src/tools/**`)
Minimum for risky PRs:
- threat/risk statement
- mitigation notes
- rollback steps
Recommended for high-risk PRs:
- include a focused test proving boundary behavior
- include one explicit failure-mode scenario and expected degradation
For agent-assisted contributions, reviewers should also verify the author demonstrates understanding of runtime behavior and blast radius.
## 9) Failure Recovery
If a merged PR causes regressions:
1. Revert PR immediately on `main`.
2. Open a follow-up issue with root-cause analysis.
3. Re-introduce fix only with regression tests.
Prefer fast restore of service quality over delayed perfect fixes.
## 10) Maintainer Checklist (Merge-Ready)
- Scope is focused and understandable.
- CI gate is green.
- Docs-quality checks are green when docs changed.
- Security impact fields are complete.
- Privacy/data-hygiene fields are complete and evidence is redacted/anonymized.
- Agent workflow notes are sufficient for reproducibility (if automation was used).
- Rollback plan is explicit.
- Commit title follows Conventional Commits.
## 11) Agent Review Operating Model
To keep review quality stable under high PR volume, we use a two-lane review model:
### Lane A: Fast triage (agent-friendly)
- Confirm PR template completeness.
- Confirm CI gate signal (`CI Required Gate`).
- Confirm risk class via labels and touched paths.
- Confirm rollback statement exists.
- Confirm privacy/data-hygiene section and neutral wording requirements are satisfied.
- Confirm any required identity-like wording uses ZeroClaw/project-native terminology.
### Lane B: Deep review (risk-based)
Required for high-risk changes (security/runtime/gateway/CI):
- Validate threat model assumptions.
- Validate failure mode and degradation behavior.
- Validate backward compatibility and migration impact.
- Validate observability/logging impact.
## 12) Queue Priority and Label Discipline
Triage order recommendation:
1. `size: XS`/`size: S` + bug/security fixes
2. `size: M` focused changes
3. `size: L`/`size: XL` split requests or staged review
Label discipline:
- Path labels identify subsystem ownership quickly.
- Size labels drive batching strategy.
- Risk labels drive review depth (`risk: low/medium/high`).
- Module labels (`<module>: <component>`) improve reviewer routing for integration-specific changes and future newly-added modules.
- `risk: manual` allows maintainers to preserve a human risk judgment when automation lacks context.
- `no-stale` is reserved for accepted-but-blocked work.
## 13) Agent Handoff Contract
When one agent hands off to another (or to a maintainer), include:
1. Scope boundary (what changed / what did not).
2. Validation evidence.
3. Open risks and unknowns.
4. Suggested next action.
This keeps context loss low and avoids repeated deep dives.

100
docs/resource-limits.md Normal file
View file

@ -0,0 +1,100 @@
# Resource Limits for ZeroClaw
## Problem
ZeroClaw has rate limiting (20 actions/hour) but no resource caps. A runaway agent could:
- Exhaust available memory
- Spin CPU at 100%
- Fill disk with logs/output
---
## Proposed Solutions
### Option 1: cgroups v2 (Linux, Recommended)
Automatically create a cgroup for zeroclaw with limits.
```bash
# Create systemd service with limits
[Service]
MemoryMax=512M
CPUQuota=100%
IOReadBandwidthMax=/dev/sda 10M
IOWriteBandwidthMax=/dev/sda 10M
TasksMax=100
```
### Option 2: tokio::task::deadlock detection
Prevent task starvation.
```rust
use tokio::time::{timeout, Duration};
pub async fn execute_with_timeout<F, T>(
fut: F,
cpu_time_limit: Duration,
memory_limit: usize,
) -> Result<T>
where
F: Future<Output = Result<T>>,
{
// CPU timeout
timeout(cpu_time_limit, fut).await?
}
```
### Option 3: Memory monitoring
Track heap usage and kill if over limit.
```rust
use std::alloc::{GlobalAlloc, Layout, System};
struct LimitedAllocator<A> {
inner: A,
max_bytes: usize,
used: std::sync::atomic::AtomicUsize,
}
unsafe impl<A: GlobalAlloc> GlobalAlloc for LimitedAllocator<A> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let current = self.used.fetch_add(layout.size(), std::sync::atomic::Ordering::Relaxed);
if current + layout.size() > self.max_bytes {
std::process::abort();
}
self.inner.alloc(layout)
}
}
```
---
## Config Schema
```toml
[resources]
# Memory limits (in MB)
max_memory_mb = 512
max_memory_per_command_mb = 128
# CPU limits
max_cpu_percent = 50
max_cpu_time_seconds = 60
# Disk I/O limits
max_log_size_mb = 100
max_temp_storage_mb = 500
# Process limits
max_subprocesses = 10
max_open_files = 100
```
---
## Implementation Priority
| Phase | Feature | Effort | Impact |
|-------|---------|--------|--------|
| **P0** | Memory monitoring + kill | Low | High |
| **P1** | CPU timeout per command | Low | High |
| **P2** | cgroups integration (Linux) | Medium | Very High |
| **P3** | Disk I/O limits | Medium | Medium |

110
docs/reviewer-playbook.md Normal file
View file

@ -0,0 +1,110 @@
# Reviewer Playbook
This playbook is the operational companion to [`docs/pr-workflow.md`](pr-workflow.md).
Use it to reduce review latency without reducing quality.
## 1) Review Objectives
- Keep queue throughput predictable.
- Keep risk review proportionate to change risk.
- Keep merge decisions reproducible and auditable.
## 2) 5-Minute Intake Triage
For every new PR, do a fast intake pass:
1. Confirm template completeness (`summary`, `validation`, `security`, `rollback`).
2. Confirm labels (`size:*`, `risk:*`, scope labels such as `provider`/`channel`/`security`, module-scoped labels such as `channel: *`/`provider: *`/`tool: *`, and contributor tier labels when applicable) are present and plausible.
3. Confirm CI signal status (`CI Required Gate`).
4. Confirm scope is one concern (reject mixed mega-PRs unless justified).
5. Confirm privacy/data-hygiene and neutral test wording requirements are satisfied.
If any intake requirement fails, leave one actionable checklist comment instead of deep review.
## 3) Risk-to-Depth Matrix
| Risk label | Typical touched paths | Minimum review depth |
|---|---|---|
| `risk: low` | docs/tests/chore, isolated non-runtime changes | 1 reviewer + CI gate |
| `risk: medium` | `src/providers/**`, `src/channels/**`, `src/memory/**`, `src/config/**` | 1 subsystem-aware reviewer + behavior verification |
| `risk: high` | `src/security/**`, `src/runtime/**`, `src/gateway/**`, `src/tools/**`, `.github/workflows/**` | fast triage + deep review, strong rollback and failure-mode checks |
When uncertain, treat as `risk: high`.
If automated risk labeling is contextually wrong, maintainers can apply `risk: manual` and set the final risk label explicitly.
## 4) Fast-Lane Checklist (All PRs)
- Scope boundary is explicit and believable.
- Validation commands are present and results are coherent.
- User-facing behavior changes are documented.
- Author demonstrates understanding of behavior and blast radius (especially for agent-assisted PRs).
- Rollback path is concrete (not just “revert”).
- Compatibility/migration impacts are clear.
- No personal/sensitive data leakage in diff artifacts; examples/tests remain neutral and project-scoped.
- If identity-like wording exists, it uses ZeroClaw/project-native roles (not personal or real-world identities).
- Naming and architecture boundaries follow project contracts (`AGENTS.md`, `CONTRIBUTING.md`).
## 5) Deep Review Checklist (High Risk)
For high-risk PRs, verify at least one example in each category:
- **Security boundaries**: deny-by-default behavior preserved, no accidental scope broadening.
- **Failure modes**: error handling is explicit and degrades safely.
- **Contract stability**: CLI/config/API compatibility preserved or migration documented.
- **Observability**: failures are diagnosable without leaking secrets.
- **Rollback safety**: revert path and blast radius are clear.
## 6) Issue Triage Playbook
Use labels to keep backlog actionable:
- `r:needs-repro` for incomplete bug reports.
- `r:support` for usage/support questions better routed outside bug backlog.
- `duplicate` / `invalid` for non-actionable duplicates/noise.
- `no-stale` for accepted work waiting on external blockers.
- Request redaction if logs/payloads include personal identifiers or sensitive data.
## 7) Review Comment Style
Prefer checklist-style comments with one of these outcomes:
- **Ready to merge** (explicitly say why).
- **Needs author action** (ordered list of blockers).
- **Needs deeper security/runtime review** (state exact risk and requested evidence).
Avoid vague comments that create back-and-forth latency.
## 8) Automation Override Protocol
Use this when automation output creates review side effects:
1. **Incorrect risk label**: add `risk: manual`, then set the intended `risk:*` label.
2. **Incorrect auto-close on issue triage**: reopen issue, remove route label, and leave one clarifying comment.
3. **Label spam/noise**: keep one canonical maintainer comment and remove redundant route labels.
4. **Ambiguous PR scope**: request split before deep review.
### PR Backlog Pruning Protocol
When review demand exceeds capacity, apply this order:
1. Keep active bug/security PRs (`size: XS/S`) at the top of queue.
2. Ask overlapping PRs to consolidate; close older ones as `superseded` after acknowledgement.
3. Mark dormant PRs as `stale-candidate` before stale closure window starts.
4. Require rebase + fresh validation before reopening stale/superseded technical work.
## 9) Handoff Protocol
If handing off review to another maintainer/agent, include:
1. Scope summary
2. Current risk class and why
3. What has been validated already
4. Open blockers
5. Suggested next action
## 10) Weekly Queue Hygiene
- Review stale queue and apply `no-stale` only to accepted-but-blocked work.
- Prioritize `size: XS/S` bug/security PRs first.
- Convert recurring support issues into docs updates and auto-response guidance.

190
docs/sandboxing.md Normal file
View file

@ -0,0 +1,190 @@
# ZeroClaw Sandboxing Strategies
## Problem
ZeroClaw currently has application-layer security (allowlists, path blocking, command injection protection) but lacks OS-level containment. If an attacker is on the allowlist, they can run any allowed command with zeroclaw's user permissions.
## Proposed Solutions
### Option 1: Firejail Integration (Recommended for Linux)
Firejail provides user-space sandboxing with minimal overhead.
```rust
// src/security/firejail.rs
use std::process::Command;
pub struct FirejailSandbox {
enabled: bool,
}
impl FirejailSandbox {
pub fn new() -> Self {
let enabled = which::which("firejail").is_ok();
Self { enabled }
}
pub fn wrap_command(&self, cmd: &mut Command) -> &mut Command {
if !self.enabled {
return cmd;
}
// Firejail wraps any command with sandboxing
let mut jail = Command::new("firejail");
jail.args([
"--private=home", // New home directory
"--private-dev", // Minimal /dev
"--nosound", // No audio
"--no3d", // No 3D acceleration
"--novideo", // No video devices
"--nowheel", // No input devices
"--notv", // No TV devices
"--noprofile", // Skip profile loading
"--quiet", // Suppress warnings
]);
// Append original command
if let Some(program) = cmd.get_program().to_str() {
jail.arg(program);
}
for arg in cmd.get_args() {
if let Some(s) = arg.to_str() {
jail.arg(s);
}
}
// Replace original command with firejail wrapper
*cmd = jail;
cmd
}
}
```
**Config option:**
```toml
[security]
enable_sandbox = true
sandbox_backend = "firejail" # or "none", "bubblewrap", "docker"
```
---
### Option 2: Bubblewrap (Portable, no root required)
Bubblewrap uses user namespaces to create containers.
```bash
# Install bubblewrap
sudo apt install bubblewrap
# Wrap command:
bwrap --ro-bind /usr /usr \
--dev /dev \
--proc /proc \
--bind /workspace /workspace \
--unshare-all \
--share-net \
--die-with-parent \
-- /bin/sh -c "command"
```
---
### Option 3: Docker-in-Docker (Heavyweight but complete isolation)
Run agent tools inside ephemeral containers.
```rust
pub struct DockerSandbox {
image: String,
}
impl DockerSandbox {
pub async fn execute(&self, command: &str, workspace: &Path) -> Result<String> {
let output = Command::new("docker")
.args([
"run", "--rm",
"--memory", "512m",
"--cpus", "1.0",
"--network", "none",
"--volume", &format!("{}:/workspace", workspace.display()),
&self.image,
"sh", "-c", command
])
.output()
.await?;
Ok(String::from_utf8_lossy(&output.stdout).to_string())
}
}
```
---
### Option 4: Landlock (Linux Kernel LSM, Rust native)
Landlock provides file system access control without containers.
```rust
use landlock::{Ruleset, AccessFS};
pub fn apply_landlock() -> Result<()> {
let ruleset = Ruleset::new()
.set_access_fs(AccessFS::read_file | AccessFS::write_file)
.add_path(Path::new("/workspace"), AccessFS::read_file | AccessFS::write_file)?
.add_path(Path::new("/tmp"), AccessFS::read_file | AccessFS::write_file)?
.restrict_self()?;
Ok(())
}
```
---
## Priority Implementation Order
| Phase | Solution | Effort | Security Gain |
|-------|----------|--------|---------------|
| **P0** | Landlock (Linux only, native) | Low | High (filesystem) |
| **P1** | Firejail integration | Low | Very High |
| **P2** | Bubblewrap wrapper | Medium | Very High |
| **P3** | Docker sandbox mode | High | Complete |
## Config Schema Extension
```toml
[security.sandbox]
enabled = true
backend = "auto" # auto | firejail | bubblewrap | landlock | docker | none
# Firejail-specific
[security.sandbox.firejail]
extra_args = ["--seccomp", "--caps.drop=all"]
# Landlock-specific
[security.sandbox.landlock]
readonly_paths = ["/usr", "/bin", "/lib"]
readwrite_paths = ["$HOME/workspace", "/tmp/zeroclaw"]
```
## Testing Strategy
```rust
#[cfg(test)]
mod tests {
#[test]
fn sandbox_blocks_path_traversal() {
// Try to read /etc/passwd through sandbox
let result = sandboxed_execute("cat /etc/passwd");
assert!(result.is_err());
}
#[test]
fn sandbox_allows_workspace_access() {
let result = sandboxed_execute("ls /workspace");
assert!(result.is_ok());
}
#[test]
fn sandbox_no_network_isolation() {
// Ensure network is blocked when configured
let result = sandboxed_execute("curl http://example.com");
assert!(result.is_err());
}
}
```

180
docs/security-roadmap.md Normal file
View file

@ -0,0 +1,180 @@
# ZeroClaw Security Improvement Roadmap
## Current State: Strong Foundation
ZeroClaw already has **excellent application-layer security**:
✅ Command allowlist (not blocklist)
✅ Path traversal protection
✅ Command injection blocking (`$(...)`, backticks, `&&`, `>`)
✅ Secret isolation (API keys not leaked to shell)
✅ Rate limiting (20 actions/hour)
✅ Channel authorization (empty = deny all, `*` = allow all)
✅ Risk classification (Low/Medium/High)
✅ Environment variable sanitization
✅ Forbidden paths blocking
✅ Comprehensive test coverage (1,017 tests)
## What's Missing: OS-Level Containment
🔴 No OS-level sandboxing (chroot, containers, namespaces)
🔴 No resource limits (CPU, memory, disk I/O caps)
🔴 No tamper-evident audit logging
🔴 No syscall filtering (seccomp)
---
## Comparison: ZeroClaw vs PicoClaw vs Production Grade
| Feature | PicoClaw | ZeroClaw Now | ZeroClaw + Roadmap | Production Target |
|---------|----------|--------------|-------------------|-------------------|
| **Binary Size** | ~8MB | **3.4MB** ✅ | 3.5-4MB | < 5MB |
| **RAM Usage** | < 10MB | **< 5MB** | < 10MB | < 20MB |
| **Startup Time** | < 1s | **< 10ms** | < 50ms | < 100ms |
| **Command Allowlist** | Unknown | ✅ Yes | ✅ Yes | ✅ Yes |
| **Path Blocking** | Unknown | ✅ Yes | ✅ Yes | ✅ Yes |
| **Injection Protection** | Unknown | ✅ Yes | ✅ Yes | ✅ Yes |
| **OS Sandbox** | No | ❌ No | ✅ Firejail/Landlock | ✅ Container/namespaces |
| **Resource Limits** | No | ❌ No | ✅ cgroups/Monitor | ✅ Full cgroups |
| **Audit Logging** | No | ❌ No | ✅ HMAC-signed | ✅ SIEM integration |
| **Security Score** | C | **B+** | **A-** | **A+** |
---
## Implementation Roadmap
### Phase 1: Quick Wins (1-2 weeks)
**Goal**: Address critical gaps with minimal complexity
| Task | File | Effort | Impact |
|------|------|--------|-------|
| Landlock filesystem sandbox | `src/security/landlock.rs` | 2 days | High |
| Memory monitoring + OOM kill | `src/resources/memory.rs` | 1 day | High |
| CPU timeout per command | `src/tools/shell.rs` | 1 day | High |
| Basic audit logging | `src/security/audit.rs` | 2 days | Medium |
| Config schema updates | `src/config/schema.rs` | 1 day | - |
**Deliverables**:
- Linux: Filesystem access restricted to workspace
- All platforms: Memory/CPU guards against runaway commands
- All platforms: Tamper-evident audit trail
---
### Phase 2: Platform Integration (2-3 weeks)
**Goal**: Deep OS integration for production-grade isolation
| Task | Effort | Impact |
|------|--------|-------|
| Firejail auto-detection + wrapping | 3 days | Very High |
| Bubblewrap wrapper for macOS/*nix | 4 days | Very High |
| cgroups v2 systemd integration | 3 days | High |
| seccomp syscall filtering | 5 days | High |
| Audit log query CLI | 2 days | Medium |
**Deliverables**:
- Linux: Full container-like isolation via Firejail
- macOS: Bubblewrap filesystem isolation
- Linux: cgroups resource enforcement
- Linux: Syscall allowlisting
---
### Phase 3: Production Hardening (1-2 weeks)
**Goal**: Enterprise security features
| Task | Effort | Impact |
|------|--------|-------|
| Docker sandbox mode option | 3 days | High |
| Certificate pinning for channels | 2 days | Medium |
| Signed config verification | 2 days | Medium |
| SIEM-compatible audit export | 2 days | Medium |
| Security self-test (`zeroclaw audit --check`) | 1 day | Low |
**Deliverables**:
- Optional Docker-based execution isolation
- HTTPS certificate pinning for channel webhooks
- Config file signature verification
- JSON/CSV audit export for external analysis
---
## New Config Schema Preview
```toml
[security]
level = "strict" # relaxed | default | strict | paranoid
# Sandbox configuration
[security.sandbox]
enabled = true
backend = "auto" # auto | firejail | bubblewrap | landlock | docker | none
# Resource limits
[resources]
max_memory_mb = 512
max_memory_per_command_mb = 128
max_cpu_percent = 50
max_cpu_time_seconds = 60
max_subprocesses = 10
# Audit logging
[security.audit]
enabled = true
log_path = "~/.config/zeroclaw/audit.log"
sign_events = true
max_size_mb = 100
# Autonomy (existing, enhanced)
[autonomy]
level = "supervised" # readonly | supervised | full
allowed_commands = ["git", "ls", "cat", "grep", "find"]
forbidden_paths = ["/etc", "/root", "~/.ssh"]
require_approval_for_medium_risk = true
block_high_risk_commands = true
max_actions_per_hour = 20
```
---
## CLI Commands Preview
```bash
# Security status check
zeroclaw security --check
# → ✓ Sandbox: Firejail active
# → ✓ Audit logging enabled (42 events today)
# → → Resource limits: 512MB mem, 50% CPU
# Audit log queries
zeroclaw audit --user @alice --since 24h
zeroclaw audit --risk high --violations-only
zeroclaw audit --verify-signatures
# Sandbox test
zeroclaw sandbox --test
# → Testing isolation...
# ✓ Cannot read /etc/passwd
# ✓ Cannot access ~/.ssh
# ✓ Can read /workspace
```
---
## Summary
**ZeroClaw is already more secure than PicoClaw** with:
- 50% smaller binary (3.4MB vs 8MB)
- 50% less RAM (< 5MB vs < 10MB)
- 100x faster startup (< 10ms vs < 1s)
- Comprehensive security policy engine
- Extensive test coverage
**By implementing this roadmap**, ZeroClaw becomes:
- Production-grade with OS-level sandboxing
- Resource-aware with memory/CPU guards
- Audit-ready with tamper-evident logging
- Enterprise-ready with configurable security levels
**Estimated effort**: 4-7 weeks for full implementation
**Value**: Transforms ZeroClaw from "safe for testing" to "safe for production"

View file

@ -12,6 +12,8 @@ use tokio::sync::mpsc;
pub struct ChannelMessage { pub struct ChannelMessage {
pub id: String, pub id: String,
pub sender: String, pub sender: String,
/// Channel-specific reply address (e.g. Telegram chat_id, Discord channel_id).
pub reply_to: String,
pub content: String, pub content: String,
pub channel: String, pub channel: String,
pub timestamp: u64, pub timestamp: u64,
@ -90,9 +92,12 @@ impl Channel for TelegramChannel {
continue; continue;
} }
let chat_id = msg["chat"]["id"].to_string();
let channel_msg = ChannelMessage { let channel_msg = ChannelMessage {
id: msg["message_id"].to_string(), id: msg["message_id"].to_string(),
sender, sender,
reply_to: chat_id,
content: msg["text"].as_str().unwrap_or("").to_string(), content: msg["text"].as_str().unwrap_or("").to_string(),
channel: "telegram".into(), channel: "telegram".into(),
timestamp: msg["date"].as_u64().unwrap_or(0), timestamp: msg["date"].as_u64().unwrap_or(0),

View file

@ -0,0 +1,143 @@
/*
* ZeroClaw Arduino Uno Firmware
*
* Listens for JSON commands on Serial (115200 baud), executes gpio_read/gpio_write,
* responds with JSON. Compatible with ZeroClaw SerialPeripheral protocol.
*
* Protocol (newline-delimited JSON):
* Request: {"id":"1","cmd":"gpio_write","args":{"pin":13,"value":1}}
* Response: {"id":"1","ok":true,"result":"done"}
*
* Arduino Uno: Pin 13 has built-in LED. Digital pins 0-13 supported.
*
* 1. Open in Arduino IDE
* 2. Select Board: Arduino Uno
* 3. Select correct Port (Tools -> Port)
* 4. Upload
*/
#define BAUDRATE 115200
#define MAX_LINE 256
char lineBuf[MAX_LINE];
int lineLen = 0;
// Parse integer from JSON: "pin":13 or "value":1
int parseArg(const char* key, const char* json) {
char search[32];
snprintf(search, sizeof(search), "\"%s\":", key);
const char* p = strstr(json, search);
if (!p) return -1;
p += strlen(search);
return atoi(p);
}
// Extract "id" for response
void copyId(char* out, int outLen, const char* json) {
const char* p = strstr(json, "\"id\":\"");
if (!p) {
out[0] = '0';
out[1] = '\0';
return;
}
p += 6;
int i = 0;
while (i < outLen - 1 && *p && *p != '"') {
out[i++] = *p++;
}
out[i] = '\0';
}
// Check if cmd is present
bool hasCmd(const char* json, const char* cmd) {
char search[64];
snprintf(search, sizeof(search), "\"cmd\":\"%s\"", cmd);
return strstr(json, search) != NULL;
}
void handleLine(const char* line) {
char idBuf[16];
copyId(idBuf, sizeof(idBuf), line);
if (hasCmd(line, "ping")) {
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.println("\",\"ok\":true,\"result\":\"pong\"}");
return;
}
// Phase C: Dynamic discovery — report GPIO pins and LED pin
if (hasCmd(line, "capabilities")) {
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.print("\",\"ok\":true,\"result\":\"{\\\"gpio\\\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13],\\\"led_pin\\\":13}\"}");
Serial.println();
return;
}
if (hasCmd(line, "gpio_read")) {
int pin = parseArg("pin", line);
if (pin < 0 || pin > 13) {
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.print("\",\"ok\":false,\"result\":\"\",\"error\":\"Invalid pin ");
Serial.print(pin);
Serial.println("\"}");
return;
}
pinMode(pin, INPUT);
int val = digitalRead(pin);
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.print("\",\"ok\":true,\"result\":\"");
Serial.print(val);
Serial.println("\"}");
return;
}
if (hasCmd(line, "gpio_write")) {
int pin = parseArg("pin", line);
int value = parseArg("value", line);
if (pin < 0 || pin > 13) {
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.print("\",\"ok\":false,\"result\":\"\",\"error\":\"Invalid pin ");
Serial.print(pin);
Serial.println("\"}");
return;
}
pinMode(pin, OUTPUT);
digitalWrite(pin, value ? HIGH : LOW);
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.println("\",\"ok\":true,\"result\":\"done\"}");
return;
}
// Unknown command
Serial.print("{\"id\":\"");
Serial.print(idBuf);
Serial.println("\",\"ok\":false,\"result\":\"\",\"error\":\"Unknown command\"}");
}
void setup() {
Serial.begin(BAUDRATE);
lineLen = 0;
}
void loop() {
while (Serial.available()) {
char c = Serial.read();
if (c == '\n' || c == '\r') {
if (lineLen > 0) {
lineBuf[lineLen] = '\0';
handleLine(lineBuf);
lineLen = 0;
}
} else if (lineLen < MAX_LINE - 1) {
lineBuf[lineLen++] = c;
} else {
lineLen = 0; // Overflow, discard
}
}
}

View file

@ -0,0 +1,13 @@
[build]
target = "riscv32imc-esp-espidf"
[target.riscv32imc-esp-espidf]
linker = "ldproxy"
rustflags = [
"--cfg", 'espidf_time64',
"-C", "default-linker-libraries",
]
[unstable]
build-std = ["std", "panic_abort"]
build-std-features = ["panic_immediate_abort"]

View file

@ -0,0 +1,46 @@
[package]
name = "zeroclaw-esp32-ui"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "ZeroClaw ESP32 UI firmware with Slint - Graphical interface for AI assistant"
authors = ["ZeroClaw Team"]
[dependencies]
anyhow = "1.0"
esp-idf-svc = "0.48"
log = { version = "0.4", default-features = false }
# Slint UI - MCU optimized
slint = { version = "1.10", default-features = false, features = [
"compat-1-2",
"libm",
"renderer-software",
] }
[build-dependencies]
embuild = { version = "0.31", features = ["elf"] }
slint-build = "1.10"
[features]
default = ["std", "display-st7789"]
std = ["esp-idf-svc/std"]
# Display selection (choose one)
display-st7789 = [] # 320x240 or 135x240
display-ili9341 = [] # 320x240
display-ssd1306 = [] # 128x64 OLED
# Input
touch-xpt2046 = [] # Resistive touch
touch-ft6x36 = [] # Capacitive touch
[profile.release]
opt-level = "s"
lto = true
codegen-units = 1
strip = true
panic = "abort"
[profile.dev]
opt-level = "s"

View file

@ -0,0 +1,106 @@
# ZeroClaw ESP32 UI Firmware
Slint-based graphical UI firmware scaffold for ZeroClaw edge scenarios on ESP32.
## Scope of This Crate
This crate intentionally provides a **minimal, bootable UI scaffold**:
- Initializes ESP-IDF logging/runtime patches
- Compiles and runs a small Slint UI (`MainWindow`)
- Keeps display and touch feature flags available for incremental driver integration
What this crate **does not** do yet:
- No full chat runtime integration
- No production display/touch driver wiring in `src/main.rs`
- No Wi-Fi/BLE transport logic
## Features
- **Slint UI scaffold** suitable for MCU-oriented iteration
- **Display feature flags** for ST7789, ILI9341, SSD1306
- **Touch feature flags** for XPT2046 and FT6X36 integration planning
- **ESP-IDF baseline** for embedded target builds
## Project Structure
```text
firmware/zeroclaw-esp32-ui/
├── Cargo.toml # Rust package and feature flags
├── build.rs # Slint compilation hook
├── .cargo/
│ └── config.toml # Cross-compilation defaults
├── ui/
│ └── main.slint # Slint UI definition
└── src/
└── main.rs # Firmware entry point
```
## Prerequisites
1. **ESP Rust toolchain**
```bash
cargo install espup
espup install
source ~/export-esp.sh
```
2. **Flashing tools**
```bash
cargo install espflash cargo-espflash
```
## Build and Flash
### Default target (ESP32-C3, from `.cargo/config.toml`)
```bash
cd firmware/zeroclaw-esp32-ui
cargo build --release
cargo espflash flash --release --monitor
```
### Build for ESP32-S3 (override target)
```bash
cargo build --release --target xtensa-esp32s3-espidf
```
## Feature Flags
```bash
# Switch display profile
cargo build --release --features display-ili9341
# Enable planned touch profile
cargo build --release --features touch-ft6x36
```
## UI Layout
The current `ui/main.slint` defines:
- `StatusBar`
- `MessageList`
- `InputBar`
- `MainWindow`
These components are placeholders to keep future hardware integration incremental and low-risk.
## Next Integration Steps
1. Wire real display driver initialization in `src/main.rs`
2. Attach touch input events to Slint callbacks
3. Connect UI state with ZeroClaw edge/runtime messaging
4. Add board-specific pin maps with explicit target profiles
## License
MIT - See root `LICENSE`
## References
- [Slint ESP32 Documentation](https://slint.dev/esp32)
- [ESP-IDF Rust Book](https://esp-rs.github.io/book/)
- [ZeroClaw Hardware Design](../../docs/hardware-peripherals-design.md)

View file

@ -0,0 +1,14 @@
use embuild::espidf::sysenv::output;
fn main() {
output();
slint_build::compile_with_config(
"ui/main.slint",
slint_build::CompilerConfiguration::new()
.embed_resources(slint_build::EmbedResourcesKind::EmbedForSoftwareRenderer)
.with_style("material".into()),
)
.expect("Slint UI compilation failed");
println!("cargo:rerun-if-changed=ui/");
}

View file

@ -0,0 +1,22 @@
//! ZeroClaw ESP32 UI firmware scaffold.
//!
//! This binary initializes ESP-IDF, boots a minimal Slint UI, and keeps
//! architecture boundaries explicit so hardware integrations can be added
//! incrementally.
use anyhow::Context;
use log::info;
slint::include_modules!();
fn main() -> anyhow::Result<()> {
esp_idf_svc::sys::link_patches();
esp_idf_svc::log::EspLogger::initialize_default();
info!("Starting ZeroClaw ESP32 UI scaffold");
let window = MainWindow::new().context("failed to create MainWindow")?;
window.run().context("MainWindow event loop failed")?;
Ok(())
}

View file

@ -0,0 +1,83 @@
component StatusBar inherits Rectangle {
in property <string> title_text: "ZeroClaw ESP32 UI";
in property <string> status_text: "disconnected";
height: 32px;
background: #1f2937;
border-radius: 6px;
HorizontalLayout {
padding: 8px;
Text {
text: root.title_text;
color: #e5e7eb;
font-size: 14px;
vertical-alignment: center;
}
Text {
text: root.status_text;
color: #93c5fd;
font-size: 12px;
horizontal-alignment: right;
vertical-alignment: center;
}
}
}
component MessageList inherits Rectangle {
in property <string> message_text: "UI scaffold is running";
background: #0f172a;
border-radius: 6px;
border-color: #334155;
border-width: 1px;
Text {
text: root.message_text;
color: #cbd5e1;
horizontal-alignment: center;
vertical-alignment: center;
}
}
component InputBar inherits Rectangle {
in property <string> hint_text: "Touch input integration pending";
height: 36px;
background: #1e293b;
border-radius: 6px;
Text {
text: root.hint_text;
color: #e2e8f0;
horizontal-alignment: center;
vertical-alignment: center;
font-size: 12px;
}
}
export component MainWindow inherits Window {
width: 320px;
height: 240px;
background: #020617;
VerticalLayout {
padding: 10px;
spacing: 10px;
StatusBar {
title_text: "ZeroClaw Edge UI";
status_text: "booting";
}
MessageList {
message_text: "Display/touch drivers can be wired here";
}
InputBar {
hint_text: "Use touch-xpt2046 or touch-ft6x36 feature later";
}
}
}

View file

@ -0,0 +1,11 @@
[build]
target = "riscv32imc-esp-espidf"
[target.riscv32imc-esp-espidf]
linker = "ldproxy"
runner = "espflash flash --monitor"
# ESP-IDF 5.x uses 64-bit time_t
rustflags = ["-C", "default-linker-libraries", "--cfg", "espidf_time64"]
[unstable]
build-std = ["std", "panic_abort"]

1794
firmware/zeroclaw-esp32/Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,41 @@
# ZeroClaw ESP32 firmware — JSON-over-serial peripheral for host-mediated control.
#
# Flash to ESP32 and connect via serial. The host ZeroClaw sends gpio_read/gpio_write
# commands; this firmware executes them and responds.
#
# Prerequisites: espup (cargo install espup; espup install; source ~/export-esp.sh)
# Build: cargo build --release
# Flash: cargo espflash flash --monitor
[package]
name = "zeroclaw-esp32"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "ZeroClaw ESP32 peripheral firmware — GPIO over JSON serial"
[patch.crates-io]
# Use latest esp-rs crates to fix u8/i8 char pointer compatibility with ESP-IDF 5.x
esp-idf-sys = { git = "https://github.com/esp-rs/esp-idf-sys" }
esp-idf-hal = { git = "https://github.com/esp-rs/esp-idf-hal" }
esp-idf-svc = { git = "https://github.com/esp-rs/esp-idf-svc" }
[dependencies]
esp-idf-svc = { git = "https://github.com/esp-rs/esp-idf-svc" }
log = "0.4"
anyhow = "1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
[build-dependencies]
embuild = { version = "0.33", features = ["espidf"] }
[profile.release]
opt-level = "s"
lto = true
codegen-units = 1
strip = true
panic = "abort"
[profile.dev]
opt-level = "s"

View file

@ -0,0 +1,80 @@
# ZeroClaw ESP32 Firmware
Peripheral firmware for ESP32 — speaks the same JSON-over-serial protocol as the STM32 firmware. Flash this to your ESP32, then configure ZeroClaw on the host to connect via serial.
**New to this?** See [SETUP.md](SETUP.md) for step-by-step commands and troubleshooting.
## Protocol
- **Request** (host → ESP32): `{"id":"1","cmd":"gpio_write","args":{"pin":13,"value":1}}\n`
- **Response** (ESP32 → host): `{"id":"1","ok":true,"result":"done"}\n`
Commands: `gpio_read`, `gpio_write`.
## Prerequisites
1. **RISC-V ESP-IDF** (ESP32-C2/C3): Uses nightly Rust with `build-std`.
**Python**: ESP-IDF requires Python 3.103.13 (not 3.14). If you have Python 3.14:
```sh
brew install python@3.12
```
**virtualenv** (needed by ESP-IDF tools; PEP 668 workaround on macOS):
```sh
/opt/homebrew/opt/python@3.12/bin/python3.12 -m pip install virtualenv --break-system-packages
```
**Rust tools**:
```sh
cargo install espflash ldproxy
```
The project's `rust-toolchain.toml` pins nightly + rust-src. `esp-idf-sys` downloads ESP-IDF automatically on first build. Use Python 3.12 for the build:
```sh
export PATH="/opt/homebrew/opt/python@3.12/libexec/bin:$PATH"
```
2. **Xtensa targets** (ESP32, ESP32-S2, ESP32-S3): Use espup instead:
```sh
cargo install espup espflash
espup install
source ~/export-esp.sh
```
Then edit `.cargo/config.toml` to change the target (e.g. `xtensa-esp32-espidf`).
## Build & Flash
```sh
cd firmware/zeroclaw-esp32
# Use Python 3.12 (required if you have 3.14)
export PATH="/opt/homebrew/opt/python@3.12/libexec/bin:$PATH"
# Optional: pin MCU (esp32c3 or esp32c2)
export MCU=esp32c3
cargo build --release
espflash flash target/riscv32imc-esp-espidf/release/zeroclaw-esp32 --monitor
```
## Host Config
Add to `config.toml`:
```toml
[peripherals]
enabled = true
[[peripherals.boards]]
board = "esp32"
transport = "serial"
path = "/dev/ttyUSB0" # or /dev/ttyACM0, COM3, etc.
baud = 115200
```
## Pin Mapping
Default GPIO 2 and 13 are configured for output. Edit `src/main.rs` to add more pins or change for your board. ESP32-C3 has different pin layout — adjust UART pins (gpio21/gpio20) if needed.
## Edge-Native (Future)
Phase 6 also envisions ZeroClaw running *on* the ESP32 (WiFi + LLM). This firmware is the host-mediated serial peripheral; edge-native will be a separate crate.

View file

@ -0,0 +1,156 @@
# ESP32 Firmware Setup Guide
Step-by-step setup for building the ZeroClaw ESP32 firmware. Follow this if you run into issues.
## Quick Start (copy-paste)
```sh
# 1. Install Python 3.12 (ESP-IDF needs 3.103.13, not 3.14)
brew install python@3.12
# 2. Install virtualenv (PEP 668 workaround on macOS)
/opt/homebrew/opt/python@3.12/bin/python3.12 -m pip install virtualenv --break-system-packages
# 3. Install Rust tools
cargo install espflash ldproxy
# 4. Build
cd firmware/zeroclaw-esp32
export PATH="/opt/homebrew/opt/python@3.12/libexec/bin:$PATH"
cargo build --release
# 5. Flash (connect ESP32 via USB)
espflash flash target/riscv32imc-esp-espidf/release/zeroclaw-esp32 --monitor
```
---
## Detailed Steps
### 1. Python
ESP-IDF requires Python 3.103.13. **Python 3.14 is not supported.**
```sh
brew install python@3.12
```
### 2. virtualenv
ESP-IDF tools need `virtualenv`. On macOS with Homebrew Python, PEP 668 blocks `pip install`; use:
```sh
/opt/homebrew/opt/python@3.12/bin/python3.12 -m pip install virtualenv --break-system-packages
```
### 3. Rust Tools
```sh
cargo install espflash ldproxy
```
- **espflash**: flash and monitor
- **ldproxy**: linker for ESP-IDF builds
### 4. Use Python 3.12 for Builds
Before every build (or add to `~/.zshrc`):
```sh
export PATH="/opt/homebrew/opt/python@3.12/libexec/bin:$PATH"
```
### 5. Build
```sh
cd firmware/zeroclaw-esp32
cargo build --release
```
First build downloads and compiles ESP-IDF (~515 min).
### 6. Flash
```sh
espflash flash target/riscv32imc-esp-espidf/release/zeroclaw-esp32 --monitor
```
---
## Troubleshooting
### "No space left on device"
Free disk space. Common targets:
```sh
# Cargo cache (often 520 GB)
rm -rf ~/.cargo/registry/cache ~/.cargo/registry/src
# Unused Rust toolchains
rustup toolchain list
rustup toolchain uninstall <name>
# iOS Simulator runtimes (~35 GB)
xcrun simctl delete unavailable
# Temp files
rm -rf /var/folders/*/T/cargo-install*
```
### "can't find crate for `core`" / "riscv32imc-esp-espidf target may not be installed"
This project uses **nightly Rust with build-std**, not espup. Ensure:
- `rust-toolchain.toml` exists (pins nightly + rust-src)
- You are **not** sourcing `~/export-esp.sh` (that's for Xtensa targets)
- Run `cargo build` from `firmware/zeroclaw-esp32`
### "externally-managed-environment" / "No module named 'virtualenv'"
Install virtualenv with the PEP 668 workaround:
```sh
/opt/homebrew/opt/python@3.12/bin/python3.12 -m pip install virtualenv --break-system-packages
```
### "expected `i64`, found `i32`" (time_t mismatch)
Already fixed in `.cargo/config.toml` with `espidf_time64` for ESP-IDF 5.x. If you use ESP-IDF 4.4, switch to `espidf_time32`.
### "expected `*const u8`, found `*const i8`" (esp-idf-svc)
Already fixed via `[patch.crates-io]` in `Cargo.toml` using esp-rs crates from git. Do not remove the patch.
### 10,000+ files in `git status`
The `.embuild/` directory (ESP-IDF cache) has ~100k+ files. It is in `.gitignore`. If you see them, ensure `.gitignore` contains:
```
.embuild/
```
---
## Optional: Auto-load Python 3.12
Add to `~/.zshrc`:
```sh
# ESP32 firmware build
export PATH="/opt/homebrew/opt/python@3.12/libexec/bin:$PATH"
```
---
## Xtensa Targets (ESP32, ESP32-S2, ESP32-S3)
For nonRISC-V chips, use espup instead:
```sh
cargo install espup espflash
espup install
source ~/export-esp.sh
```
Then edit `.cargo/config.toml` to use `xtensa-esp32-espidf` (or the correct target).

View file

@ -0,0 +1,3 @@
fn main() {
embuild::espidf::sysenv::output();
}

View file

@ -0,0 +1,3 @@
[toolchain]
channel = "nightly"
components = ["rust-src"]

View file

@ -0,0 +1,163 @@
//! ZeroClaw ESP32 firmware — JSON-over-serial peripheral.
//!
//! Listens for newline-delimited JSON commands on UART0, executes gpio_read/gpio_write,
//! responds with JSON. Compatible with host ZeroClaw SerialPeripheral protocol.
//!
//! Protocol: same as STM32 — see docs/hardware-peripherals-design.md
use esp_idf_svc::hal::gpio::PinDriver;
use esp_idf_svc::hal::peripherals::Peripherals;
use esp_idf_svc::hal::uart::{UartConfig, UartDriver};
use esp_idf_svc::hal::units::Hertz;
use log::info;
use serde::{Deserialize, Serialize};
/// Incoming command from host.
#[derive(Debug, Deserialize)]
struct Request {
id: String,
cmd: String,
args: serde_json::Value,
}
/// Outgoing response to host.
#[derive(Debug, Serialize)]
struct Response {
id: String,
ok: bool,
result: String,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<String>,
}
fn main() -> anyhow::Result<()> {
esp_idf_svc::sys::link_patches();
esp_idf_svc::log::EspLogger::initialize_default();
let peripherals = Peripherals::take()?;
let pins = peripherals.pins;
// Create GPIO output drivers first (they take ownership of pins)
let mut gpio2 = PinDriver::output(pins.gpio2)?;
let mut gpio13 = PinDriver::output(pins.gpio13)?;
// UART0: TX=21, RX=20 (ESP32) — ESP32-C3 may use different pins; adjust for your board
let config = UartConfig::new().baudrate(Hertz(115_200));
let uart = UartDriver::new(
peripherals.uart0,
pins.gpio21,
pins.gpio20,
Option::<esp_idf_svc::hal::gpio::Gpio0>::None,
Option::<esp_idf_svc::hal::gpio::Gpio1>::None,
&config,
)?;
info!("ZeroClaw ESP32 firmware ready on UART0 (115200)");
let mut buf = [0u8; 512];
let mut line = Vec::new();
loop {
match uart.read(&mut buf, 100) {
Ok(0) => continue,
Ok(n) => {
for &b in &buf[..n] {
if b == b'\n' {
if !line.is_empty() {
if let Ok(line_str) = std::str::from_utf8(&line) {
if let Ok(resp) = handle_request(line_str, &mut gpio2, &mut gpio13)
{
let out = serde_json::to_string(&resp).unwrap_or_default();
let _ = uart.write(format!("{}\n", out).as_bytes());
}
}
line.clear();
}
} else {
line.push(b);
if line.len() > 400 {
line.clear();
}
}
}
}
Err(_) => {}
}
}
}
fn handle_request<G2, G13>(
line: &str,
gpio2: &mut PinDriver<'_, G2>,
gpio13: &mut PinDriver<'_, G13>,
) -> anyhow::Result<Response>
where
G2: esp_idf_svc::hal::gpio::OutputMode,
G13: esp_idf_svc::hal::gpio::OutputMode,
{
let req: Request = serde_json::from_str(line.trim())?;
let id = req.id.clone();
let result = match req.cmd.as_str() {
"capabilities" => {
// Phase C: report GPIO pins and LED pin (matches Arduino protocol)
let caps = serde_json::json!({
"gpio": [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19],
"led_pin": 2
});
Ok(caps.to_string())
}
"gpio_read" => {
let pin_num = req.args.get("pin").and_then(|v| v.as_u64()).unwrap_or(0) as i32;
let value = gpio_read(pin_num)?;
Ok(value.to_string())
}
"gpio_write" => {
let pin_num = req.args.get("pin").and_then(|v| v.as_u64()).unwrap_or(0) as i32;
let value = req.args.get("value").and_then(|v| v.as_u64()).unwrap_or(0);
gpio_write(gpio2, gpio13, pin_num, value)?;
Ok("done".into())
}
_ => Err(anyhow::anyhow!("Unknown command: {}", req.cmd)),
};
match result {
Ok(r) => Ok(Response {
id,
ok: true,
result: r,
error: None,
}),
Err(e) => Ok(Response {
id,
ok: false,
result: String::new(),
error: Some(e.to_string()),
}),
}
}
fn gpio_read(_pin: i32) -> anyhow::Result<u8> {
// TODO: implement input pin read — requires storing InputPin drivers per pin
Ok(0)
}
fn gpio_write<G2, G13>(
gpio2: &mut PinDriver<'_, G2>,
gpio13: &mut PinDriver<'_, G13>,
pin: i32,
value: u64,
) -> anyhow::Result<()>
where
G2: esp_idf_svc::hal::gpio::OutputMode,
G13: esp_idf_svc::hal::gpio::OutputMode,
{
let level = esp_idf_svc::hal::gpio::Level::from(value != 0);
match pin {
2 => gpio2.set_level(level)?,
13 => gpio13.set_level(level)?,
_ => anyhow::bail!("Pin {} not configured (add to gpio_write)", pin),
}
Ok(())
}

849
firmware/zeroclaw-nucleo/Cargo.lock generated Normal file
View file

@ -0,0 +1,849 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aligned"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee4508988c62edf04abd8d92897fca0c2995d907ce1dfeaf369dac3716a40685"
dependencies = [
"as-slice",
]
[[package]]
name = "as-slice"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "516b6b4f0e40d50dcda9365d53964ec74560ad4284da2e7fc97122cd83174516"
dependencies = [
"stable_deref_trait",
]
[[package]]
name = "autocfg"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "bare-metal"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5deb64efa5bd81e31fcd1938615a6d98c82eafcbcd787162b6f63b91d6bac5b3"
dependencies = [
"rustc_version",
]
[[package]]
name = "bit_field"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e4b40c7323adcfc0a41c4b88143ed58346ff65a288fc144329c5c45e05d70c6"
[[package]]
name = "bitfield"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
[[package]]
name = "block-device-driver"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44c051592f59fe68053524b4c4935249b806f72c1f544cfb7abe4f57c3be258e"
dependencies = [
"aligned",
]
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cortex-m"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9"
dependencies = [
"bare-metal",
"bitfield",
"embedded-hal 0.2.7",
"volatile-register",
]
[[package]]
name = "cortex-m-rt"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d4dec46b34c299ccf6b036717ae0fce602faa4f4fe816d9013b9a7c9f5ba6"
dependencies = [
"cortex-m-rt-macros",
]
[[package]]
name = "cortex-m-rt-macros"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e37549a379a9e0e6e576fd208ee60394ccb8be963889eebba3ffe0980364f472"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "critical-section"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b"
[[package]]
name = "darling"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core",
"darling_macro",
]
[[package]]
name = "darling_core"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn 2.0.116",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core",
"quote",
"syn 2.0.116",
]
[[package]]
name = "defmt"
version = "0.3.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0963443817029b2024136fc4dd07a5107eb8f977eaf18fcd1fdeb11306b64ad"
dependencies = [
"defmt 1.0.1",
]
[[package]]
name = "defmt"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "548d977b6da32fa1d1fda2876453da1e7df63ad0304c8b3dae4dbe7b96f39b78"
dependencies = [
"bitflags 1.3.2",
"defmt-macros",
]
[[package]]
name = "defmt-macros"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d4fc12a85bcf441cfe44344c4b72d58493178ce635338a3f3b78943aceb258e"
dependencies = [
"defmt-parser",
"proc-macro-error2",
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "defmt-parser"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10d60334b3b2e7c9d91ef8150abfb6fa4c1c39ebbcf4a81c2e346aad939fee3e"
dependencies = [
"thiserror",
]
[[package]]
name = "defmt-rtt"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93d5a25c99d89c40f5676bec8cefe0614f17f0f40e916f98e345dae941807f9e"
dependencies = [
"critical-section",
"defmt 1.0.1",
]
[[package]]
name = "document-features"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61"
dependencies = [
"litrs",
]
[[package]]
name = "embassy-embedded-hal"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "554e3e840696f54b4c9afcf28a0f24da431c927f4151040020416e7393d6d0d8"
dependencies = [
"defmt 1.0.1",
"embassy-futures",
"embassy-hal-internal 0.3.0",
"embassy-sync",
"embassy-time",
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
"embedded-hal-async",
"embedded-storage",
"embedded-storage-async",
"nb 1.1.0",
]
[[package]]
name = "embassy-executor"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06070468370195e0e86f241c8e5004356d696590a678d47d6676795b2e439c6b"
dependencies = [
"cortex-m",
"critical-section",
"defmt 1.0.1",
"document-features",
"embassy-executor-macros",
"embassy-executor-timer-queue",
]
[[package]]
name = "embassy-executor-macros"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfdddc3a04226828316bf31393b6903ee162238576b1584ee2669af215d55472"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "embassy-executor-timer-queue"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fc328bf943af66b80b98755db9106bf7e7471b0cf47dc8559cd9a6be504cc9c"
[[package]]
name = "embassy-futures"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc2d050bdc5c21e0862a89256ed8029ae6c290a93aecefc73084b3002cdebb01"
[[package]]
name = "embassy-hal-internal"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95285007a91b619dc9f26ea8f55452aa6c60f7115a4edc05085cd2bd3127cd7a"
dependencies = [
"num-traits",
]
[[package]]
name = "embassy-hal-internal"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f10ce10a4dfdf6402d8e9bd63128986b96a736b1a0a6680547ed2ac55d55dba"
dependencies = [
"cortex-m",
"critical-section",
"defmt 1.0.1",
"num-traits",
]
[[package]]
name = "embassy-net-driver"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524eb3c489760508f71360112bca70f6e53173e6fe48fc5f0efd0f5ab217751d"
dependencies = [
"defmt 0.3.100",
]
[[package]]
name = "embassy-stm32"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "088d65743a48f2cc9b3ae274ed85d6e8b68bd3ee92eb6b87b15dca2f81f7a101"
dependencies = [
"aligned",
"bit_field",
"bitflags 2.11.0",
"block-device-driver",
"cfg-if",
"cortex-m",
"cortex-m-rt",
"critical-section",
"defmt 1.0.1",
"document-features",
"embassy-embedded-hal",
"embassy-futures",
"embassy-hal-internal 0.4.0",
"embassy-net-driver",
"embassy-sync",
"embassy-time",
"embassy-time-driver",
"embassy-time-queue-utils",
"embassy-usb-driver",
"embassy-usb-synopsys-otg",
"embedded-can",
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
"embedded-hal-async",
"embedded-hal-nb",
"embedded-io 0.7.1",
"embedded-io-async 0.7.0",
"embedded-storage",
"embedded-storage-async",
"futures-util",
"heapless 0.9.2",
"nb 1.1.0",
"proc-macro2",
"quote",
"rand_core 0.6.4",
"rand_core 0.9.5",
"sdio-host",
"static_assertions",
"stm32-fmc",
"stm32-metapac",
"trait-set",
"vcell",
"volatile-register",
]
[[package]]
name = "embassy-sync"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73974a3edbd0bd286759b3d483540f0ebef705919a5f56f4fc7709066f71689b"
dependencies = [
"cfg-if",
"critical-section",
"defmt 1.0.1",
"embedded-io-async 0.6.1",
"futures-core",
"futures-sink",
"heapless 0.8.0",
]
[[package]]
name = "embassy-time"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fa65b9284d974dad7a23bb72835c4ec85c0b540d86af7fc4098c88cff51d65"
dependencies = [
"cfg-if",
"critical-section",
"defmt 1.0.1",
"document-features",
"embassy-time-driver",
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
"embedded-hal-async",
"futures-core",
]
[[package]]
name = "embassy-time-driver"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0a244c7dc22c8d0289379c8d8830cae06bb93d8f990194d0de5efb3b5ae7ba6"
dependencies = [
"document-features",
]
[[package]]
name = "embassy-time-queue-utils"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80e2ee86063bd028a420a5fb5898c18c87a8898026da1d4c852af2c443d0a454"
dependencies = [
"embassy-executor-timer-queue",
"heapless 0.8.0",
]
[[package]]
name = "embassy-usb-driver"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17119855ccc2d1f7470a39756b12068454ae27a3eabb037d940b5c03d9c77b7a"
dependencies = [
"defmt 1.0.1",
"embedded-io-async 0.6.1",
]
[[package]]
name = "embassy-usb-synopsys-otg"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "288751f8eaa44a5cf2613f13cee0ca8e06e6638cb96e897e6834702c79084b23"
dependencies = [
"critical-section",
"defmt 1.0.1",
"embassy-sync",
"embassy-usb-driver",
]
[[package]]
name = "embedded-can"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9d2e857f87ac832df68fa498d18ddc679175cf3d2e4aa893988e5601baf9438"
dependencies = [
"nb 1.1.0",
]
[[package]]
name = "embedded-hal"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff"
dependencies = [
"nb 0.1.3",
"void",
]
[[package]]
name = "embedded-hal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361a90feb7004eca4019fb28352a9465666b24f840f5c3cddf0ff13920590b89"
[[package]]
name = "embedded-hal-async"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4c685bbef7fe13c3c6dd4da26841ed3980ef33e841cddfa15ce8a8fb3f1884"
dependencies = [
"embedded-hal 1.0.0",
]
[[package]]
name = "embedded-hal-nb"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fba4268c14288c828995299e59b12babdbe170f6c6d73731af1b4648142e8605"
dependencies = [
"embedded-hal 1.0.0",
"nb 1.1.0",
]
[[package]]
name = "embedded-io"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d"
[[package]]
name = "embedded-io"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9eb1aa714776b75c7e67e1da744b81a129b3ff919c8712b5e1b32252c1f07cc7"
dependencies = [
"defmt 1.0.1",
]
[[package]]
name = "embedded-io-async"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ff09972d4073aa8c299395be75161d582e7629cd663171d62af73c8d50dba3f"
dependencies = [
"embedded-io 0.6.1",
]
[[package]]
name = "embedded-io-async"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2564b9f813c544241430e147d8bc454815ef9ac998878d30cc3055449f7fd4c0"
dependencies = [
"defmt 1.0.1",
"embedded-io 0.7.1",
]
[[package]]
name = "embedded-storage"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a21dea9854beb860f3062d10228ce9b976da520a73474aed3171ec276bc0c032"
[[package]]
name = "embedded-storage-async"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1763775e2323b7d5f0aa6090657f5e21cfa02ede71f5dc40eead06d64dcd15cc"
dependencies = [
"embedded-storage",
]
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "futures-core"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d"
[[package]]
name = "futures-sink"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893"
[[package]]
name = "futures-task"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393"
[[package]]
name = "futures-util"
version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
]
[[package]]
name = "hash32"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
dependencies = [
"byteorder",
]
[[package]]
name = "heapless"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
dependencies = [
"hash32",
"stable_deref_trait",
]
[[package]]
name = "heapless"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af2455f757db2b292a9b1768c4b70186d443bcb3b316252d6b540aec1cd89ed"
dependencies = [
"hash32",
"stable_deref_trait",
]
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "litrs"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092"
[[package]]
name = "nb"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f"
dependencies = [
"nb 1.1.0",
]
[[package]]
name = "nb"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "panic-probe"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd402d00b0fb94c5aee000029204a46884b1262e0c443f166d86d2c0747e1a1a"
dependencies = [
"cortex-m",
"defmt 1.0.1",
]
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "proc-macro-error-attr2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5"
dependencies = [
"proc-macro2",
"quote",
]
[[package]]
name = "proc-macro-error2"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802"
dependencies = [
"proc-macro-error-attr2",
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "proc-macro2"
version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
[[package]]
name = "rand_core"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
]
[[package]]
name = "sdio-host"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b328e2cb950eeccd55b7f55c3a963691455dcd044cfb5354f0c5e68d2c2d6ee2"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "stable_deref_trait"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "stm32-fmc"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72692594faa67f052e5e06dd34460951c21e83bc55de4feb8d2666e2f15480a2"
dependencies = [
"embedded-hal 1.0.0",
]
[[package]]
name = "stm32-metapac"
version = "19.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a411079520dbccc613af73172f944b7cf97ba84e3bd7381a0352b6ec7bfef03b"
dependencies = [
"cortex-m",
"cortex-m-rt",
"defmt 0.3.100",
]
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thiserror"
version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "trait-set"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "unicode-ident"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
[[package]]
name = "vcell"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002"
[[package]]
name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "volatile-register"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc"
dependencies = [
"vcell",
]
[[package]]
name = "zeroclaw-nucleo"
version = "0.1.0"
dependencies = [
"cortex-m-rt",
"critical-section",
"defmt 1.0.1",
"defmt-rtt",
"embassy-executor",
"embassy-stm32",
"embassy-time",
"heapless 0.9.2",
"panic-probe",
]

View file

@ -0,0 +1,39 @@
# ZeroClaw Nucleo-F401RE firmware — JSON-over-serial peripheral.
#
# Listens for newline-delimited JSON on USART2 (PA2/PA3, ST-Link VCP).
# Protocol: same as Arduino/ESP32 — ping, capabilities, gpio_read, gpio_write.
#
# Build: cargo build --release
# Flash: probe-rs run --chip STM32F401RETx target/thumbv7em-none-eabihf/release/zeroclaw-nucleo
# Or: zeroclaw peripheral flash-nucleo
[package]
name = "zeroclaw-nucleo"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "ZeroClaw Nucleo-F401RE peripheral firmware — GPIO over JSON serial"
[dependencies]
embassy-executor = { version = "0.9", features = ["arch-cortex-m", "executor-thread", "defmt"] }
embassy-stm32 = { version = "0.5", features = ["defmt", "stm32f401re", "unstable-pac", "memory-x", "time-driver-tim4", "exti"] }
embassy-time = { version = "0.5", features = ["defmt", "defmt-timestamp-uptime", "tick-hz-32_768"] }
defmt = "1.0"
defmt-rtt = "1.0"
panic-probe = { version = "1.0", features = ["print-defmt"] }
heapless = { version = "0.9", default-features = false }
critical-section = "1.1"
cortex-m-rt = "0.7"
[package.metadata.embassy]
build = [
{ target = "thumbv7em-none-eabihf", artifact-dir = "target" }
]
[profile.release]
opt-level = "s"
lto = true
codegen-units = 1
strip = true
panic = "abort"
debug = 1

View file

@ -0,0 +1,187 @@
//! ZeroClaw Nucleo-F401RE firmware — JSON-over-serial peripheral.
//!
//! Listens for newline-delimited JSON on USART2 (PA2=TX, PA3=RX).
//! USART2 is connected to ST-Link VCP — host sees /dev/ttyACM0 (Linux) or /dev/cu.usbmodem* (macOS).
//!
//! Protocol: same as Arduino/ESP32 — see docs/hardware-peripherals-design.md
#![no_std]
#![no_main]
use core::fmt::Write;
use core::str;
use defmt::info;
use embassy_executor::Spawner;
use embassy_stm32::gpio::{Level, Output, Speed};
use embassy_stm32::usart::{Config, Uart};
use heapless::String;
use {defmt_rtt as _, panic_probe as _};
/// Arduino-style pin 13 = PA5 (User LED LD2 on Nucleo-F401RE)
const LED_PIN: u8 = 13;
/// Parse integer from JSON: "pin":13 or "value":1
fn parse_arg(line: &[u8], key: &[u8]) -> Option<i32> {
// key like b"pin" -> search for b"\"pin\":"
let mut suffix: [u8; 32] = [0; 32];
suffix[0] = b'"';
let mut len = 1;
for (i, &k) in key.iter().enumerate() {
if i >= 30 {
break;
}
suffix[len] = k;
len += 1;
}
suffix[len] = b'"';
suffix[len + 1] = b':';
len += 2;
let suffix = &suffix[..len];
let line_len = line.len();
if line_len < len {
return None;
}
for i in 0..=line_len - len {
if line[i..].starts_with(suffix) {
let rest = &line[i + len..];
let mut num: i32 = 0;
let mut neg = false;
let mut j = 0;
if j < rest.len() && rest[j] == b'-' {
neg = true;
j += 1;
}
while j < rest.len() && rest[j].is_ascii_digit() {
num = num * 10 + (rest[j] - b'0') as i32;
j += 1;
}
return Some(if neg { -num } else { num });
}
}
None
}
fn has_cmd(line: &[u8], cmd: &[u8]) -> bool {
let mut pat: [u8; 64] = [0; 64];
pat[0..7].copy_from_slice(b"\"cmd\":\"");
let clen = cmd.len().min(50);
pat[7..7 + clen].copy_from_slice(&cmd[..clen]);
pat[7 + clen] = b'"';
let pat = &pat[..8 + clen];
let line_len = line.len();
if line_len < pat.len() {
return false;
}
for i in 0..=line_len - pat.len() {
if line[i..].starts_with(pat) {
return true;
}
}
false
}
/// Extract "id" for response
fn copy_id(line: &[u8], out: &mut [u8]) -> usize {
let prefix = b"\"id\":\"";
if line.len() < prefix.len() + 1 {
out[0] = b'0';
return 1;
}
for i in 0..=line.len() - prefix.len() {
if line[i..].starts_with(prefix) {
let start = i + prefix.len();
let mut j = 0;
while start + j < line.len() && j < out.len() - 1 && line[start + j] != b'"' {
out[j] = line[start + j];
j += 1;
}
return j;
}
}
out[0] = b'0';
1
}
#[embassy_executor::main]
async fn main(_spawner: Spawner) {
let p = embassy_stm32::init(Default::default());
let mut config = Config::default();
config.baudrate = 115_200;
let mut usart = Uart::new_blocking(p.USART2, p.PA3, p.PA2, config).unwrap();
let mut led = Output::new(p.PA5, Level::Low, Speed::Low);
info!("ZeroClaw Nucleo firmware ready on USART2 (115200)");
let mut line_buf: heapless::Vec<u8, 256> = heapless::Vec::new();
let mut id_buf = [0u8; 16];
let mut resp_buf: String<128> = String::new();
loop {
let mut byte = [0u8; 1];
if usart.blocking_read(&mut byte).is_ok() {
let b = byte[0];
if b == b'\n' || b == b'\r' {
if !line_buf.is_empty() {
let id_len = copy_id(&line_buf, &mut id_buf);
let id_str = str::from_utf8(&id_buf[..id_len]).unwrap_or("0");
resp_buf.clear();
if has_cmd(&line_buf, b"ping") {
let _ = write!(resp_buf, "{{\"id\":\"{}\",\"ok\":true,\"result\":\"pong\"}}", id_str);
} else if has_cmd(&line_buf, b"capabilities") {
let _ = write!(
resp_buf,
"{{\"id\":\"{}\",\"ok\":true,\"result\":\"{{\\\"gpio\\\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13],\\\"led_pin\\\":13}}\"}}",
id_str
);
} else if has_cmd(&line_buf, b"gpio_read") {
let pin = parse_arg(&line_buf, b"pin").unwrap_or(-1);
if pin == LED_PIN as i32 {
// Output doesn't support read; return 0 (LED state not readable)
let _ = write!(resp_buf, "{{\"id\":\"{}\",\"ok\":true,\"result\":\"0\"}}", id_str);
} else if pin >= 0 && pin <= 13 {
let _ = write!(resp_buf, "{{\"id\":\"{}\",\"ok\":true,\"result\":\"0\"}}", id_str);
} else {
let _ = write!(
resp_buf,
"{{\"id\":\"{}\",\"ok\":false,\"result\":\"\",\"error\":\"Invalid pin {}\"}}",
id_str, pin
);
}
} else if has_cmd(&line_buf, b"gpio_write") {
let pin = parse_arg(&line_buf, b"pin").unwrap_or(-1);
let value = parse_arg(&line_buf, b"value").unwrap_or(0);
if pin == LED_PIN as i32 {
led.set_level(if value != 0 { Level::High } else { Level::Low });
let _ = write!(resp_buf, "{{\"id\":\"{}\",\"ok\":true,\"result\":\"done\"}}", id_str);
} else if pin >= 0 && pin <= 13 {
let _ = write!(resp_buf, "{{\"id\":\"{}\",\"ok\":true,\"result\":\"done\"}}", id_str);
} else {
let _ = write!(
resp_buf,
"{{\"id\":\"{}\",\"ok\":false,\"result\":\"\",\"error\":\"Invalid pin {}\"}}",
id_str, pin
);
}
} else {
let _ = write!(
resp_buf,
"{{\"id\":\"{}\",\"ok\":false,\"result\":\"\",\"error\":\"Unknown command\"}}",
id_str
);
}
let _ = usart.blocking_write(resp_buf.as_bytes());
let _ = usart.blocking_write(b"\n");
line_buf.clear();
}
} else if line_buf.push(b).is_err() {
line_buf.clear();
}
}
}
}

View file

@ -0,0 +1,9 @@
name: ZeroClaw Bridge
description: "GPIO bridge for ZeroClaw — exposes digitalWrite/digitalRead via socket for agent control"
icon: 🦀
version: "1.0.0"
ports:
- 9999
bricks: []

View file

@ -0,0 +1,66 @@
# ZeroClaw Bridge — socket server for GPIO control from ZeroClaw agent
# SPDX-License-Identifier: MPL-2.0
import socket
import threading
from arduino.app_utils import App, Bridge
ZEROCLAW_PORT = 9999
def handle_client(conn):
try:
data = conn.recv(256).decode().strip()
if not data:
conn.close()
return
parts = data.split()
if len(parts) < 2:
conn.sendall(b"error: invalid command\n")
conn.close()
return
cmd = parts[0].lower()
if cmd == "gpio_write" and len(parts) >= 3:
pin = int(parts[1])
value = int(parts[2])
Bridge.call("digitalWrite", [pin, value])
conn.sendall(b"ok\n")
elif cmd == "gpio_read" and len(parts) >= 2:
pin = int(parts[1])
val = Bridge.call("digitalRead", [pin])
conn.sendall(f"{val}\n".encode())
else:
conn.sendall(b"error: unknown command\n")
except Exception as e:
try:
conn.sendall(f"error: {e}\n".encode())
except Exception:
pass
finally:
conn.close()
def accept_loop(server):
while True:
try:
conn, _ = server.accept()
t = threading.Thread(target=handle_client, args=(conn,))
t.daemon = True
t.start()
except Exception:
break
def loop():
App.sleep(1)
def main():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("127.0.0.1", ZEROCLAW_PORT))
server.listen(5)
server.settimeout(1.0)
t = threading.Thread(target=accept_loop, args=(server,))
t.daemon = True
t.start()
App.run(user_loop=loop)
if __name__ == "__main__":
main()

View file

@ -0,0 +1 @@
# ZeroClaw Bridge — no extra deps (arduino.app_utils is preinstalled on Uno Q)

View file

@ -0,0 +1,24 @@
// ZeroClaw Bridge — expose digitalWrite/digitalRead for agent GPIO control
// SPDX-License-Identifier: MPL-2.0
#include "Arduino_RouterBridge.h"
void gpio_write(int pin, int value) {
pinMode(pin, OUTPUT);
digitalWrite(pin, value ? HIGH : LOW);
}
int gpio_read(int pin) {
pinMode(pin, INPUT);
return digitalRead(pin);
}
void setup() {
Bridge.begin();
Bridge.provide("digitalWrite", gpio_write);
Bridge.provide("digitalRead", gpio_read);
}
void loop() {
Bridge.update();
}

View file

@ -0,0 +1,11 @@
profiles:
default:
fqbn: arduino:zephyr:unoq
platforms:
- platform: arduino:zephyr
libraries:
- MsgPack (0.4.2)
- DebugLog (0.8.4)
- ArxContainer (0.7.0)
- ArxTypeTraits (0.3.1)
default_profile: default

154
python/README.md Normal file
View file

@ -0,0 +1,154 @@
# zeroclaw-tools
Python companion package for [ZeroClaw](https://github.com/zeroclaw-labs/zeroclaw) — LangGraph-based tool calling for consistent LLM agent execution.
## Why This Package?
Some LLM providers (particularly GLM-5/Zhipu and similar models) have inconsistent tool calling behavior when using text-based tool invocation. This package provides a LangGraph-based approach that delivers:
- **Consistent tool calling** across all OpenAI-compatible providers
- **Automatic tool loop** — keeps calling tools until the task is complete
- **Easy extensibility** — add new tools with a simple `@tool` decorator
- **Framework agnostic** — works with any OpenAI-compatible API
## Installation
```bash
pip install zeroclaw-tools
```
With Discord integration:
```bash
pip install zeroclaw-tools[discord]
```
## Quick Start
### Basic Agent
```python
import asyncio
from zeroclaw_tools import create_agent, shell, file_read, file_write
from langchain_core.messages import HumanMessage
async def main():
# Create agent with tools
agent = create_agent(
tools=[shell, file_read, file_write],
model="glm-5",
api_key="your-api-key",
base_url="https://api.z.ai/api/coding/paas/v4"
)
# Execute a task
result = await agent.ainvoke({
"messages": [HumanMessage(content="List files in /tmp directory")]
})
print(result["messages"][-1].content)
asyncio.run(main())
```
### CLI Usage
```bash
# Set environment variables
export API_KEY="your-api-key"
export API_BASE="https://api.z.ai/api/coding/paas/v4"
# Run the CLI
zeroclaw-tools "List files in the current directory"
# Interactive mode (no message required)
zeroclaw-tools -i
```
### Discord Bot
```python
import os
from zeroclaw_tools.integrations import DiscordBot
bot = DiscordBot(
token=os.environ["DISCORD_TOKEN"],
guild_id=123456789,
allowed_users=["123456789"]
)
bot.run()
```
## Available Tools
| Tool | Description |
|------|-------------|
| `shell` | Execute shell commands |
| `file_read` | Read file contents |
| `file_write` | Write content to files |
| `web_search` | Search the web (requires Brave API key) |
| `http_request` | Make HTTP requests |
| `memory_store` | Store data in memory |
| `memory_recall` | Recall stored data |
## Creating Custom Tools
```python
from zeroclaw_tools import tool
@tool
def my_custom_tool(query: str) -> str:
"""Description of what this tool does."""
# Your implementation here
return f"Result for: {query}"
# Use with agent
agent = create_agent(tools=[my_custom_tool])
```
## Provider Compatibility
Works with any OpenAI-compatible provider:
- **Z.AI / GLM-5**`https://api.z.ai/api/coding/paas/v4`
- **OpenRouter**`https://openrouter.ai/api/v1`
- **Groq**`https://api.groq.com/openai/v1`
- **DeepSeek**`https://api.deepseek.com`
- **Ollama**`http://localhost:11434/v1`
- **And many more...**
## Architecture
```
┌─────────────────────────────────────────────┐
│ Your Application │
├─────────────────────────────────────────────┤
│ zeroclaw-tools Agent │
│ ┌─────────────────────────────────────┐ │
│ │ LangGraph StateGraph │ │
│ │ ┌───────────┐ ┌──────────┐ │ │
│ │ │ Agent │───▶│ Tools │ │ │
│ │ │ Node │◀───│ Node │ │ │
│ │ └───────────┘ └──────────┘ │ │
│ └─────────────────────────────────────┘ │
├─────────────────────────────────────────────┤
│ OpenAI-Compatible LLM Provider │
└─────────────────────────────────────────────┘
```
## Comparison with Rust ZeroClaw
| Feature | Rust ZeroClaw | zeroclaw-tools |
|---------|---------------|----------------|
| **Binary size** | ~3.4 MB | Python package |
| **Memory** | <5 MB | ~50 MB |
| **Startup** | <10ms | ~500ms |
| **Tool consistency** | Model-dependent | LangGraph guarantees |
| **Extensibility** | Rust traits | Python decorators |
Use **Rust ZeroClaw** for production edge deployments. Use **zeroclaw-tools** when you need guaranteed tool calling consistency or Python ecosystem integration.
## License
MIT License — see [LICENSE](../LICENSE)

67
python/pyproject.toml Normal file
View file

@ -0,0 +1,67 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "zeroclaw-tools"
version = "0.1.0"
description = "Python companion package for ZeroClaw - LangGraph-based tool calling for consistent LLM agent execution"
readme = "README.md"
license = "MIT"
requires-python = ">=3.10"
authors = [
{ name = "ZeroClaw Community" }
]
keywords = [
"ai",
"llm",
"agent",
"langgraph",
"zeroclaw",
"tool-calling",
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
dependencies = [
"langgraph>=0.2.0",
"langchain-core>=0.3.0",
"langchain-openai>=0.2.0",
"httpx>=0.25.0",
]
[project.scripts]
zeroclaw-tools = "zeroclaw_tools.__main__:main"
[project.optional-dependencies]
discord = ["discord.py>=2.3.0"]
telegram = ["python-telegram-bot>=20.0"]
dev = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"ruff>=0.1.0",
]
[project.urls]
Homepage = "https://github.com/zeroclaw-labs/zeroclaw"
Documentation = "https://github.com/zeroclaw-labs/zeroclaw/tree/main/python"
Repository = "https://github.com/zeroclaw-labs/zeroclaw"
Issues = "https://github.com/zeroclaw-labs/zeroclaw/issues"
[tool.hatch.build.targets.wheel]
packages = ["zeroclaw_tools"]
[tool.ruff]
line-length = 100
target-version = "py310"
[tool.pytest.ini_options]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"

0
python/tests/__init__.py Normal file
View file

103
python/tests/test_tools.py Normal file
View file

@ -0,0 +1,103 @@
"""
Tests for zeroclaw-tools package.
"""
import pytest
def test_import_main():
"""Test that main package imports work."""
from zeroclaw_tools import create_agent, shell, file_read, file_write
assert callable(create_agent)
assert hasattr(shell, "invoke")
assert hasattr(file_read, "invoke")
assert hasattr(file_write, "invoke")
def test_import_tool_decorator():
"""Test that tool decorator works."""
from zeroclaw_tools import tool
@tool
def test_func(x: str) -> str:
"""Test tool."""
return x
assert hasattr(test_func, "invoke")
def test_tool_decorator_custom_metadata():
"""Test that custom tool metadata is preserved."""
from zeroclaw_tools import tool
@tool(name="echo_tool", description="Echo input back")
def echo(value: str) -> str:
return value
assert echo.name == "echo_tool"
assert "Echo input back" in echo.description
def test_agent_creation():
"""Test that agent can be created with default tools."""
from zeroclaw_tools import create_agent, shell, file_read, file_write
agent = create_agent(
tools=[shell, file_read, file_write], model="test-model", api_key="test-key"
)
assert agent is not None
assert agent.model == "test-model"
def test_cli_allows_interactive_without_message():
"""Interactive mode should not require positional message."""
from zeroclaw_tools.__main__ import parse_args
args = parse_args(["-i"])
assert args.interactive is True
assert args.message == []
def test_cli_requires_message_when_not_interactive():
"""Non-interactive mode requires at least one message token."""
from zeroclaw_tools.__main__ import parse_args
with pytest.raises(SystemExit):
parse_args([])
@pytest.mark.asyncio
async def test_invoke_in_event_loop_raises():
"""invoke() should fail fast when called from an active event loop."""
from zeroclaw_tools import create_agent, shell
agent = create_agent(tools=[shell], model="test-model", api_key="test-key")
with pytest.raises(RuntimeError, match="ainvoke"):
agent.invoke({"messages": []})
@pytest.mark.asyncio
async def test_shell_tool():
"""Test shell tool execution."""
from zeroclaw_tools import shell
result = await shell.ainvoke({"command": "echo hello"})
assert "hello" in result
@pytest.mark.asyncio
async def test_file_tools(tmp_path):
"""Test file read/write tools."""
from zeroclaw_tools import file_read, file_write
test_file = tmp_path / "test.txt"
write_result = await file_write.ainvoke({"path": str(test_file), "content": "Hello, World!"})
assert "Successfully" in write_result
read_result = await file_read.ainvoke({"path": str(test_file)})
assert "Hello, World!" in read_result

Some files were not shown because too many files have changed in this diff Show more