From e5ef8a3b62b27c6002bed210bbe1b6efed9e360c Mon Sep 17 00:00:00 2001 From: ZeroClaw Contributor Date: Tue, 17 Feb 2026 01:35:40 +0300 Subject: [PATCH] feat(python): add zeroclaw-tools companion package for LangGraph tool calling - Add Python package with LangGraph-based agent for consistent tool calling - Provides reliable tool execution for providers with inconsistent native support - Includes tools: shell, file_read, file_write, web_search, http_request, memory - Discord bot integration included - CLI tool for quick interactions - Works with any OpenAI-compatible provider (Z.AI, OpenRouter, Groq, etc.) Why: Some LLM providers (e.g., GLM-5/Zhipu) have inconsistent tool calling behavior. LangGraph's structured approach guarantees reliable tool execution across all providers. --- README.md | 34 +++ docs/langgraph-integration.md | 239 ++++++++++++++++++ python/README.md | 151 +++++++++++ python/pyproject.toml | 66 +++++ python/tests/__init__.py | 0 python/tests/test_tools.py | 62 +++++ python/zeroclaw_tools/__init__.py | 32 +++ python/zeroclaw_tools/__main__.py | 113 +++++++++ python/zeroclaw_tools/agent.py | 161 ++++++++++++ .../zeroclaw_tools/integrations/__init__.py | 7 + .../integrations/discord_bot.py | 174 +++++++++++++ python/zeroclaw_tools/tools/__init__.py | 20 ++ python/zeroclaw_tools/tools/base.py | 46 ++++ python/zeroclaw_tools/tools/file.py | 60 +++++ python/zeroclaw_tools/tools/memory.py | 86 +++++++ python/zeroclaw_tools/tools/shell.py | 32 +++ python/zeroclaw_tools/tools/web.py | 88 +++++++ 17 files changed, 1371 insertions(+) create mode 100644 docs/langgraph-integration.md create mode 100644 python/README.md create mode 100644 python/pyproject.toml create mode 100644 python/tests/__init__.py create mode 100644 python/tests/test_tools.py create mode 100644 python/zeroclaw_tools/__init__.py create mode 100644 python/zeroclaw_tools/__main__.py create mode 100644 python/zeroclaw_tools/agent.py create mode 100644 python/zeroclaw_tools/integrations/__init__.py create mode 100644 python/zeroclaw_tools/integrations/discord_bot.py create mode 100644 python/zeroclaw_tools/tools/__init__.py create mode 100644 python/zeroclaw_tools/tools/base.py create mode 100644 python/zeroclaw_tools/tools/file.py create mode 100644 python/zeroclaw_tools/tools/memory.py create mode 100644 python/zeroclaw_tools/tools/shell.py create mode 100644 python/zeroclaw_tools/tools/web.py diff --git a/README.md b/README.md index c90c58e..dc9882a 100644 --- a/README.md +++ b/README.md @@ -417,6 +417,40 @@ format = "openclaw" # "openclaw" (default, markdown files) or "aieos # aieos_inline = '{"identity":{"names":{"first":"Nova"}}}' # inline AIEOS JSON ``` +## Python Companion Package (`zeroclaw-tools`) + +For LLM providers with inconsistent native tool calling (e.g., GLM-5/Zhipu), ZeroClaw ships a Python companion package with **LangGraph-based tool calling** for guaranteed consistency: + +```bash +pip install zeroclaw-tools +``` + +```python +from zeroclaw_tools import create_agent, shell, file_read +from langchain_core.messages import HumanMessage + +# Works with any OpenAI-compatible provider +agent = create_agent( + tools=[shell, file_read], + model="glm-5", + api_key="your-key", + base_url="https://api.z.ai/api/coding/paas/v4" +) + +result = await agent.ainvoke({ + "messages": [HumanMessage(content="List files in /tmp")] +}) +print(result["messages"][-1].content) +``` + +**Why use it:** +- **Consistent tool calling** across all providers (even those with poor native support) +- **Automatic tool loop** — keeps calling tools until the task is complete +- **Easy extensibility** — add custom tools with `@tool` decorator +- **Discord/Telegram bots** included + +See [`python/README.md`](python/README.md) for full documentation. + ## Identity System (AIEOS Support) ZeroClaw supports **identity-agnostic** AI personas through two formats: diff --git a/docs/langgraph-integration.md b/docs/langgraph-integration.md new file mode 100644 index 0000000..a7e64f9 --- /dev/null +++ b/docs/langgraph-integration.md @@ -0,0 +1,239 @@ +# LangGraph Integration Guide + +This guide explains how to use the `zeroclaw-tools` Python package for consistent tool calling with any OpenAI-compatible LLM provider. + +## Background + +Some LLM providers, particularly Chinese models like GLM-5 (Zhipu AI), have inconsistent tool calling behavior when using text-based tool invocation. ZeroClaw's Rust core uses structured tool calling via the OpenAI API format, but some models respond better to a different approach. + +LangGraph provides a stateful graph execution engine that guarantees consistent tool calling behavior regardless of the underlying model's native capabilities. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Your Application │ +├─────────────────────────────────────────────────────────────┤ +│ zeroclaw-tools Agent │ +│ │ +│ ┌─────────────────────────────────────────────────────┐ │ +│ │ LangGraph StateGraph │ │ +│ │ │ │ +│ │ ┌────────────┐ ┌────────────┐ │ │ +│ │ │ Agent │ ──────▶ │ Tools │ │ │ +│ │ │ Node │ ◀────── │ Node │ │ │ +│ │ └────────────┘ └────────────┘ │ │ +│ │ │ │ │ │ +│ │ ▼ ▼ │ │ +│ │ [Continue?] [Execute Tool] │ │ +│ │ │ │ │ │ +│ │ Yes │ No Result│ │ │ +│ │ ▼ ▼ │ │ +│ │ [END] [Back to Agent] │ │ +│ │ │ │ +│ └─────────────────────────────────────────────────────┘ │ +│ │ +├─────────────────────────────────────────────────────────────┤ +│ OpenAI-Compatible LLM Provider │ +│ (Z.AI, OpenRouter, Groq, DeepSeek, Ollama, etc.) │ +└─────────────────────────────────────────────────────────────┘ +``` + +## Quick Start + +### Installation + +```bash +pip install zeroclaw-tools +``` + +### Basic Usage + +```python +import asyncio +from zeroclaw_tools import create_agent, shell, file_read, file_write +from langchain_core.messages import HumanMessage + +async def main(): + agent = create_agent( + tools=[shell, file_read, file_write], + model="glm-5", + api_key="your-api-key", + base_url="https://api.z.ai/api/coding/paas/v4" + ) + + result = await agent.ainvoke({ + "messages": [HumanMessage(content="Read /etc/hostname and tell me the machine name")] + }) + + print(result["messages"][-1].content) + +asyncio.run(main()) +``` + +## Available Tools + +### Core Tools + +| Tool | Description | +|------|-------------| +| `shell` | Execute shell commands | +| `file_read` | Read file contents | +| `file_write` | Write content to files | + +### Extended Tools + +| Tool | Description | +|------|-------------| +| `web_search` | Search the web (requires `BRAVE_API_KEY`) | +| `http_request` | Make HTTP requests | +| `memory_store` | Store data in persistent memory | +| `memory_recall` | Recall stored data | + +## Custom Tools + +Create your own tools with the `@tool` decorator: + +```python +from zeroclaw_tools import tool, create_agent + +@tool +def get_weather(city: str) -> str: + """Get the current weather for a city.""" + # Your implementation + return f"Weather in {city}: Sunny, 25°C" + +@tool +def query_database(sql: str) -> str: + """Execute a SQL query and return results.""" + # Your implementation + return "Query returned 5 rows" + +agent = create_agent( + tools=[get_weather, query_database], + model="glm-5", + api_key="your-key" +) +``` + +## Provider Configuration + +### Z.AI / GLM-5 + +```python +agent = create_agent( + model="glm-5", + api_key="your-zhipu-key", + base_url="https://api.z.ai/api/coding/paas/v4" +) +``` + +### OpenRouter + +```python +agent = create_agent( + model="anthropic/claude-3.5-sonnet", + api_key="your-openrouter-key", + base_url="https://openrouter.ai/api/v1" +) +``` + +### Groq + +```python +agent = create_agent( + model="llama-3.3-70b-versatile", + api_key="your-groq-key", + base_url="https://api.groq.com/openai/v1" +) +``` + +### Ollama (Local) + +```python +agent = create_agent( + model="llama3.2", + base_url="http://localhost:11434/v1" +) +``` + +## Discord Bot Integration + +```python +import os +from zeroclaw_tools.integrations import DiscordBot + +bot = DiscordBot( + token=os.environ["DISCORD_TOKEN"], + guild_id=123456789, # Your Discord server ID + allowed_users=["123456789"], # User IDs that can use the bot + api_key=os.environ["API_KEY"], + model="glm-5" +) + +bot.run() +``` + +## CLI Usage + +```bash +# Set environment variables +export API_KEY="your-key" +export BRAVE_API_KEY="your-brave-key" # Optional, for web search + +# Single message +zeroclaw-tools "What is the current date?" + +# Interactive mode +zeroclaw-tools -i +``` + +## Comparison with Rust ZeroClaw + +| Aspect | Rust ZeroClaw | zeroclaw-tools | +|--------|---------------|-----------------| +| **Performance** | Ultra-fast (~10ms startup) | Python startup (~500ms) | +| **Memory** | <5 MB | ~50 MB | +| **Binary size** | ~3.4 MB | pip package | +| **Tool consistency** | Model-dependent | LangGraph guarantees | +| **Extensibility** | Rust traits | Python decorators | +| **Ecosystem** | Rust crates | PyPI packages | + +**When to use Rust ZeroClaw:** +- Production edge deployments +- Resource-constrained environments (Raspberry Pi, etc.) +- Maximum performance requirements + +**When to use zeroclaw-tools:** +- Models with inconsistent native tool calling +- Python-centric development +- Rapid prototyping +- Integration with Python ML ecosystem + +## Troubleshooting + +### "API key required" error + +Set the `API_KEY` environment variable or pass `api_key` to `create_agent()`. + +### Tool calls not executing + +Ensure your model supports function calling. Some older models may not support tools. + +### Rate limiting + +Add delays between calls or implement your own rate limiting: + +```python +import asyncio + +for message in messages: + result = await agent.ainvoke({"messages": [message]}) + await asyncio.sleep(1) # Rate limit +``` + +## Related Projects + +- [rs-graph-llm](https://github.com/a-agmon/rs-graph-llm) - Rust LangGraph alternative +- [langchain-rust](https://github.com/Abraxas-365/langchain-rust) - LangChain for Rust +- [llm-chain](https://github.com/sobelio/llm-chain) - LLM chains in Rust diff --git a/python/README.md b/python/README.md new file mode 100644 index 0000000..5ad7c7b --- /dev/null +++ b/python/README.md @@ -0,0 +1,151 @@ +# zeroclaw-tools + +Python companion package for [ZeroClaw](https://github.com/zeroclaw-labs/zeroclaw) — LangGraph-based tool calling for consistent LLM agent execution. + +## Why This Package? + +Some LLM providers (particularly GLM-5/Zhipu and similar models) have inconsistent tool calling behavior when using text-based tool invocation. This package provides a LangGraph-based approach that delivers: + +- **Consistent tool calling** across all OpenAI-compatible providers +- **Automatic tool loop** — keeps calling tools until the task is complete +- **Easy extensibility** — add new tools with a simple `@tool` decorator +- **Framework agnostic** — works with any OpenAI-compatible API + +## Installation + +```bash +pip install zeroclaw-tools +``` + +With Discord integration: + +```bash +pip install zeroclaw-tools[discord] +``` + +## Quick Start + +### Basic Agent + +```python +import asyncio +from zeroclaw_tools import create_agent, shell, file_read, file_write +from langchain_core.messages import HumanMessage + +async def main(): + # Create agent with tools + agent = create_agent( + tools=[shell, file_read, file_write], + model="glm-5", + api_key="your-api-key", + base_url="https://api.z.ai/api/coding/paas/v4" + ) + + # Execute a task + result = await agent.ainvoke({ + "messages": [HumanMessage(content="List files in /tmp directory")] + }) + + print(result["messages"][-1].content) + +asyncio.run(main()) +``` + +### CLI Usage + +```bash +# Set environment variables +export API_KEY="your-api-key" +export API_BASE="https://api.z.ai/api/coding/paas/v4" + +# Run the CLI +zeroclaw-tools "List files in the current directory" +``` + +### Discord Bot + +```python +import os +from zeroclaw_tools.integrations import DiscordBot + +bot = DiscordBot( + token=os.environ["DISCORD_TOKEN"], + guild_id=123456789, + allowed_users=["123456789"] +) + +bot.run() +``` + +## Available Tools + +| Tool | Description | +|------|-------------| +| `shell` | Execute shell commands | +| `file_read` | Read file contents | +| `file_write` | Write content to files | +| `web_search` | Search the web (requires Brave API key) | +| `http_request` | Make HTTP requests | +| `memory_store` | Store data in memory | +| `memory_recall` | Recall stored data | + +## Creating Custom Tools + +```python +from zeroclaw_tools import tool + +@tool +def my_custom_tool(query: str) -> str: + """Description of what this tool does.""" + # Your implementation here + return f"Result for: {query}" + +# Use with agent +agent = create_agent(tools=[my_custom_tool]) +``` + +## Provider Compatibility + +Works with any OpenAI-compatible provider: + +- **Z.AI / GLM-5** — `https://api.z.ai/api/coding/paas/v4` +- **OpenRouter** — `https://openrouter.ai/api/v1` +- **Groq** — `https://api.groq.com/openai/v1` +- **DeepSeek** — `https://api.deepseek.com` +- **Ollama** — `http://localhost:11434/v1` +- **And many more...** + +## Architecture + +``` +┌─────────────────────────────────────────────┐ +│ Your Application │ +├─────────────────────────────────────────────┤ +│ zeroclaw-tools Agent │ +│ ┌─────────────────────────────────────┐ │ +│ │ LangGraph StateGraph │ │ +│ │ ┌───────────┐ ┌──────────┐ │ │ +│ │ │ Agent │───▶│ Tools │ │ │ +│ │ │ Node │◀───│ Node │ │ │ +│ │ └───────────┘ └──────────┘ │ │ +│ └─────────────────────────────────────┘ │ +├─────────────────────────────────────────────┤ +│ OpenAI-Compatible LLM Provider │ +└─────────────────────────────────────────────┘ +``` + +## Comparison with Rust ZeroClaw + +| Feature | Rust ZeroClaw | zeroclaw-tools | +|---------|---------------|----------------| +| **Binary size** | ~3.4 MB | Python package | +| **Memory** | <5 MB | ~50 MB | +| **Startup** | <10ms | ~500ms | +| **Tool consistency** | Model-dependent | LangGraph guarantees | +| **Extensibility** | Rust traits | Python decorators | + +Use **Rust ZeroClaw** for production edge deployments. Use **zeroclaw-tools** when you need guaranteed tool calling consistency or Python ecosystem integration. + +## License + +MIT License — see [LICENSE](../LICENSE) diff --git a/python/pyproject.toml b/python/pyproject.toml new file mode 100644 index 0000000..00a53b3 --- /dev/null +++ b/python/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "zeroclaw-tools" +version = "0.1.0" +description = "Python companion package for ZeroClaw - LangGraph-based tool calling for consistent LLM agent execution" +readme = "README.md" +license = "MIT" +requires-python = ">=3.10" +authors = [ + { name = "ZeroClaw Community" } +] +keywords = [ + "ai", + "llm", + "agent", + "langgraph", + "zeroclaw", + "tool-calling", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "langgraph>=0.2.0", + "langchain-core>=0.3.0", + "langchain-openai>=0.2.0", + "httpx>=0.25.0", +] + +[project.scripts] +zeroclaw-tools = "zeroclaw_tools.__main__:main" + +[project.optional-dependencies] +discord = ["discord.py>=2.3.0"] +telegram = ["python-telegram-bot>=20.0"] +dev = [ + "pytest>=7.0.0", + "pytest-asyncio>=0.21.0", + "ruff>=0.1.0", +] + +[project.urls] +Homepage = "https://github.com/zeroclaw-labs/zeroclaw" +Documentation = "https://github.com/zeroclaw-labs/zeroclaw/tree/main/python" +Repository = "https://github.com/zeroclaw-labs/zeroclaw" +Issues = "https://github.com/zeroclaw-labs/zeroclaw/issues" + +[tool.hatch.build.targets.wheel] +packages = ["zeroclaw_tools"] + +[tool.ruff] +line-length = 100 +target-version = "py310" + +[tool.pytest.ini_options] +asyncio_mode = "auto" diff --git a/python/tests/__init__.py b/python/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/python/tests/test_tools.py b/python/tests/test_tools.py new file mode 100644 index 0000000..14318fd --- /dev/null +++ b/python/tests/test_tools.py @@ -0,0 +1,62 @@ +""" +Tests for zeroclaw-tools package. +""" + +import pytest + + +def test_import_main(): + """Test that main package imports work.""" + from zeroclaw_tools import create_agent, shell, file_read, file_write + + assert callable(create_agent) + assert hasattr(shell, "invoke") + assert hasattr(file_read, "invoke") + assert hasattr(file_write, "invoke") + + +def test_import_tool_decorator(): + """Test that tool decorator works.""" + from zeroclaw_tools import tool + + @tool + def test_func(x: str) -> str: + """Test tool.""" + return x + + assert hasattr(test_func, "invoke") + + +def test_agent_creation(): + """Test that agent can be created with default tools.""" + from zeroclaw_tools import create_agent, shell, file_read, file_write + + agent = create_agent( + tools=[shell, file_read, file_write], model="test-model", api_key="test-key" + ) + + assert agent is not None + assert agent.model == "test-model" + + +@pytest.mark.asyncio +async def test_shell_tool(): + """Test shell tool execution.""" + from zeroclaw_tools import shell + + result = await shell.ainvoke({"command": "echo hello"}) + assert "hello" in result + + +@pytest.mark.asyncio +async def test_file_tools(tmp_path): + """Test file read/write tools.""" + from zeroclaw_tools import file_read, file_write + + test_file = tmp_path / "test.txt" + + write_result = await file_write.ainvoke({"path": str(test_file), "content": "Hello, World!"}) + assert "Successfully" in write_result + + read_result = await file_read.ainvoke({"path": str(test_file)}) + assert "Hello, World!" in read_result diff --git a/python/zeroclaw_tools/__init__.py b/python/zeroclaw_tools/__init__.py new file mode 100644 index 0000000..be72de5 --- /dev/null +++ b/python/zeroclaw_tools/__init__.py @@ -0,0 +1,32 @@ +""" +ZeroClaw Tools - LangGraph-based tool calling for consistent LLM agent execution. + +This package provides a reliable tool-calling layer for LLM providers that may have +inconsistent native tool calling behavior. Built on LangGraph for guaranteed execution. +""" + +from .agent import create_agent, ZeroclawAgent +from .tools import ( + shell, + file_read, + file_write, + web_search, + http_request, + memory_store, + memory_recall, +) +from .tools.base import tool + +__version__ = "0.1.0" +__all__ = [ + "create_agent", + "ZeroclawAgent", + "tool", + "shell", + "file_read", + "file_write", + "web_search", + "http_request", + "memory_store", + "memory_recall", +] diff --git a/python/zeroclaw_tools/__main__.py b/python/zeroclaw_tools/__main__.py new file mode 100644 index 0000000..e6c9639 --- /dev/null +++ b/python/zeroclaw_tools/__main__.py @@ -0,0 +1,113 @@ +""" +CLI entry point for zeroclaw-tools. +""" + +import argparse +import asyncio +import os +import sys + +from langchain_core.messages import HumanMessage + +from .agent import create_agent +from .tools import ( + shell, + file_read, + file_write, + web_search, + http_request, + memory_store, + memory_recall, +) + + +DEFAULT_SYSTEM_PROMPT = """You are ZeroClaw, an AI assistant with full system access. Use tools to accomplish tasks. +Be concise and helpful. Execute tools directly without excessive explanation.""" + + +async def chat(message: str, api_key: str, base_url: str, model: str) -> str: + """Run a single chat message through the agent.""" + agent = create_agent( + tools=[shell, file_read, file_write, web_search, http_request, memory_store, memory_recall], + model=model, + api_key=api_key, + base_url=base_url, + system_prompt=DEFAULT_SYSTEM_PROMPT, + ) + + result = await agent.ainvoke({"messages": [HumanMessage(content=message)]}) + return result["messages"][-1].content or "Done." + + +def main(): + """CLI main entry point.""" + parser = argparse.ArgumentParser( + description="ZeroClaw Tools - LangGraph-based tool calling for LLMs" + ) + parser.add_argument("message", nargs="+", help="Message to send to the agent") + parser.add_argument("--model", "-m", default="glm-5", help="Model to use") + parser.add_argument("--api-key", "-k", default=None, help="API key") + parser.add_argument("--base-url", "-u", default=None, help="API base URL") + parser.add_argument("--interactive", "-i", action="store_true", help="Interactive mode") + + args = parser.parse_args() + + api_key = args.api_key or os.environ.get("API_KEY") or os.environ.get("GLM_API_KEY") + base_url = args.base_url or os.environ.get("API_BASE", "https://api.z.ai/api/coding/paas/v4") + + if not api_key: + print("Error: API key required. Set API_KEY env var or use --api-key", file=sys.stderr) + sys.exit(1) + + if args.interactive: + print("ZeroClaw Tools CLI (Interactive Mode)") + print("Type 'exit' to quit\n") + + agent = create_agent( + tools=[ + shell, + file_read, + file_write, + web_search, + http_request, + memory_store, + memory_recall, + ], + model=args.model, + api_key=api_key, + base_url=base_url, + system_prompt=DEFAULT_SYSTEM_PROMPT, + ) + + history = [] + + while True: + try: + user_input = input("You: ").strip() + if not user_input: + continue + if user_input.lower() in ["exit", "quit", "q"]: + print("Goodbye!") + break + + history.append(HumanMessage(content=user_input)) + + result = asyncio.run(agent.ainvoke({"messages": history})) + + for msg in result["messages"][len(history) :]: + history.append(msg) + + response = result["messages"][-1].content or "Done." + print(f"\nZeroClaw: {response}\n") + + except KeyboardInterrupt: + print("\nGoodbye!") + break + else: + message = " ".join(args.message) + result = asyncio.run(chat(message, api_key, base_url, args.model)) + print(result) + + +if __name__ == "__main__": + main() diff --git a/python/zeroclaw_tools/agent.py b/python/zeroclaw_tools/agent.py new file mode 100644 index 0000000..35d0855 --- /dev/null +++ b/python/zeroclaw_tools/agent.py @@ -0,0 +1,161 @@ +""" +LangGraph-based agent factory for consistent tool calling. +""" + +import os +from typing import Any, Callable, Optional + +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_core.tools import BaseTool +from langchain_openai import ChatOpenAI +from langgraph.graph import StateGraph, MessagesState, END +from langgraph.prebuilt import ToolNode + + +SYSTEM_PROMPT = """You are ZeroClaw, an AI assistant with tool access. Use tools to accomplish tasks. +Be concise and helpful. Execute tools directly when needed without excessive explanation.""" + + +class ZeroclawAgent: + """ + LangGraph-based agent with consistent tool calling behavior. + + This agent wraps an LLM with LangGraph's tool execution loop, ensuring + reliable tool calling even with providers that have inconsistent native + tool calling support. + """ + + def __init__( + self, + tools: list[BaseTool], + model: str = "glm-5", + api_key: Optional[str] = None, + base_url: Optional[str] = None, + temperature: float = 0.7, + system_prompt: Optional[str] = None, + ): + self.tools = tools + self.model = model + self.temperature = temperature + self.system_prompt = system_prompt or SYSTEM_PROMPT + + api_key = api_key or os.environ.get("API_KEY") or os.environ.get("GLM_API_KEY") + base_url = base_url or os.environ.get("API_BASE", "https://api.z.ai/api/coding/paas/v4") + + if not api_key: + raise ValueError( + "API key required. Set API_KEY environment variable or pass api_key parameter." + ) + + self.llm = ChatOpenAI( + model=model, + api_key=api_key, + base_url=base_url, + temperature=temperature, + ).bind_tools(tools) + + self._graph = self._build_graph() + + def _build_graph(self) -> StateGraph: + """Build the LangGraph execution graph.""" + tool_node = ToolNode(self.tools) + + def should_continue(state: MessagesState) -> str: + messages = state["messages"] + last_message = messages[-1] + if hasattr(last_message, "tool_calls") and last_message.tool_calls: + return "tools" + return END + + async def call_model(state: MessagesState) -> dict: + response = await self.llm.ainvoke(state["messages"]) + return {"messages": [response]} + + workflow = StateGraph(MessagesState) + workflow.add_node("agent", call_model) + workflow.add_node("tools", tool_node) + workflow.set_entry_point("agent") + workflow.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END}) + workflow.add_edge("tools", "agent") + + return workflow.compile() + + async def ainvoke(self, input: dict[str, Any], config: Optional[dict] = None) -> dict: + """ + Asynchronously invoke the agent. + + Args: + input: Dict with "messages" key containing list of messages + config: Optional LangGraph config + + Returns: + Dict with "messages" key containing the conversation + """ + messages = input.get("messages", []) + + if messages and isinstance(messages[0], HumanMessage): + if not any(isinstance(m, SystemMessage) for m in messages): + messages = [SystemMessage(content=self.system_prompt)] + messages + + return await self._graph.ainvoke({"messages": messages}, config) + + def invoke(self, input: dict[str, Any], config: Optional[dict] = None) -> dict: + """ + Synchronously invoke the agent. + """ + import asyncio + + return asyncio.run(self.ainvoke(input, config)) + + +def create_agent( + tools: Optional[list[BaseTool]] = None, + model: str = "glm-5", + api_key: Optional[str] = None, + base_url: Optional[str] = None, + temperature: float = 0.7, + system_prompt: Optional[str] = None, +) -> ZeroclawAgent: + """ + Create a ZeroClaw agent with LangGraph-based tool calling. + + Args: + tools: List of tools. Defaults to shell, file_read, file_write. + model: Model name to use + api_key: API key for the provider + base_url: Base URL for the provider API + temperature: Sampling temperature + system_prompt: Custom system prompt + + Returns: + Configured ZeroclawAgent instance + + Example: + ```python + from zeroclaw_tools import create_agent, shell, file_read + from langchain_core.messages import HumanMessage + + agent = create_agent( + tools=[shell, file_read], + model="glm-5", + api_key="your-key" + ) + + result = await agent.ainvoke({ + "messages": [HumanMessage(content="List files in /tmp")] + }) + ``` + """ + if tools is None: + from .tools import shell, file_read, file_write + + tools = [shell, file_read, file_write] + + return ZeroclawAgent( + tools=tools, + model=model, + api_key=api_key, + base_url=base_url, + temperature=temperature, + system_prompt=system_prompt, + ) diff --git a/python/zeroclaw_tools/integrations/__init__.py b/python/zeroclaw_tools/integrations/__init__.py new file mode 100644 index 0000000..e26f400 --- /dev/null +++ b/python/zeroclaw_tools/integrations/__init__.py @@ -0,0 +1,7 @@ +""" +Integrations for various platforms (Discord, Telegram, etc.) +""" + +from .discord_bot import DiscordBot + +__all__ = ["DiscordBot"] diff --git a/python/zeroclaw_tools/integrations/discord_bot.py b/python/zeroclaw_tools/integrations/discord_bot.py new file mode 100644 index 0000000..45a9d7d --- /dev/null +++ b/python/zeroclaw_tools/integrations/discord_bot.py @@ -0,0 +1,174 @@ +""" +Discord bot integration for ZeroClaw. +""" + +import asyncio +import os +from typing import Optional, Set + +try: + import discord + from discord.ext import commands + + DISCORD_AVAILABLE = True +except ImportError: + DISCORD_AVAILABLE = False + discord = None + +from langchain_core.messages import HumanMessage, SystemMessage + +from ..agent import create_agent +from ..tools import shell, file_read, file_write, web_search + + +class DiscordBot: + """ + Discord bot powered by ZeroClaw agent with LangGraph tool calling. + + Example: + ```python + import os + from zeroclaw_tools.integrations import DiscordBot + + bot = DiscordBot( + token=os.environ["DISCORD_TOKEN"], + guild_id=123456789, + allowed_users=["123456789"], + api_key=os.environ["API_KEY"] + ) + + bot.run() + ``` + """ + + def __init__( + self, + token: str, + guild_id: int, + allowed_users: list[str], + api_key: Optional[str] = None, + base_url: Optional[str] = None, + model: str = "glm-5", + prefix: str = "", + ): + if not DISCORD_AVAILABLE: + raise ImportError( + "discord.py is required for Discord integration. " + "Install with: pip install zeroclaw-tools[discord]" + ) + + self.token = token + self.guild_id = guild_id + self.allowed_users: Set[str] = set(allowed_users) + self.api_key = api_key or os.environ.get("API_KEY") + self.base_url = base_url or os.environ.get("API_BASE") + self.model = model + self.prefix = prefix + + self._histories: dict[str, list] = {} + self._max_history = 20 + + intents = discord.Intents.default() + intents.message_content = True + intents.guilds = True + + self.client = discord.Client(intents=intents) + self._setup_events() + + def _setup_events(self): + @self.client.event + async def on_ready(): + print(f"ZeroClaw Discord Bot ready: {self.client.user}") + print(f"Guild: {self.guild_id}") + print(f"Allowed users: {self.allowed_users}") + + @self.client.event + async def on_message(message): + if message.author == self.client.user: + return + + if message.guild and message.guild.id != self.guild_id: + return + + user_id = str(message.author.id) + if user_id not in self.allowed_users: + return + + content = message.content.strip() + if not content: + return + + if self.prefix and not content.startswith(self.prefix): + return + + if self.prefix: + content = content[len(self.prefix) :].strip() + + print(f"[{message.author}] {content[:50]}...") + + async with message.channel.typing(): + try: + response = await self._process_message(content, user_id) + for chunk in self._split_message(response): + await message.reply(chunk) + except Exception as e: + print(f"Error: {e}") + await message.reply(f"Error: {e}") + + async def _process_message(self, content: str, user_id: str) -> str: + """Process a message and return the response.""" + agent = create_agent( + tools=[shell, file_read, file_write, web_search], + model=self.model, + api_key=self.api_key, + base_url=self.base_url, + ) + + messages = [] + + if user_id in self._histories: + for msg in self._histories[user_id][-10:]: + messages.append(msg) + + messages.append(HumanMessage(content=content)) + + result = await agent.ainvoke({"messages": messages}) + + if user_id not in self._histories: + self._histories[user_id] = [] + self._histories[user_id].append(HumanMessage(content=content)) + + for msg in result["messages"][len(messages) :]: + self._histories[user_id].append(msg) + + self._histories[user_id] = self._histories[user_id][-self._max_history * 2 :] + + final = result["messages"][-1] + return final.content or "Done." + + @staticmethod + def _split_message(text: str, max_len: int = 1900) -> list[str]: + """Split long messages for Discord's character limit.""" + if len(text) <= max_len: + return [text] + + chunks = [] + while text: + if len(text) <= max_len: + chunks.append(text) + break + + pos = text.rfind("\n", 0, max_len) + if pos == -1: + pos = text.rfind(" ", 0, max_len) + if pos == -1: + pos = max_len + + chunks.append(text[:pos].strip()) + text = text[pos:].strip() + + return chunks + + def run(self): + """Start the Discord bot.""" + self.client.run(self.token) diff --git a/python/zeroclaw_tools/tools/__init__.py b/python/zeroclaw_tools/tools/__init__.py new file mode 100644 index 0000000..230becf --- /dev/null +++ b/python/zeroclaw_tools/tools/__init__.py @@ -0,0 +1,20 @@ +""" +Built-in tools for ZeroClaw agents. +""" + +from .base import tool +from .shell import shell +from .file import file_read, file_write +from .web import web_search, http_request +from .memory import memory_store, memory_recall + +__all__ = [ + "tool", + "shell", + "file_read", + "file_write", + "web_search", + "http_request", + "memory_store", + "memory_recall", +] diff --git a/python/zeroclaw_tools/tools/base.py b/python/zeroclaw_tools/tools/base.py new file mode 100644 index 0000000..e78a555 --- /dev/null +++ b/python/zeroclaw_tools/tools/base.py @@ -0,0 +1,46 @@ +""" +Base utilities for creating tools. +""" + +from typing import Any, Callable, Optional + +from langchain_core.tools import tool as langchain_tool + + +def tool( + func: Optional[Callable] = None, + *, + name: Optional[str] = None, + description: Optional[str] = None, +) -> Any: + """ + Decorator to create a LangChain tool from a function. + + This is a convenience wrapper around langchain_core.tools.tool that + provides a simpler interface for ZeroClaw users. + + Args: + func: The function to wrap (when used without parentheses) + name: Optional custom name for the tool + description: Optional custom description + + Returns: + A BaseTool instance + + Example: + ```python + from zeroclaw_tools import tool + + @tool + def my_tool(query: str) -> str: + \"\"\"Description of what this tool does.\"\"\" + return f"Result: {query}" + ``` + """ + if func is not None: + return langchain_tool(func) + + def decorator(f: Callable) -> Any: + return langchain_tool(f, name=name) + + return decorator diff --git a/python/zeroclaw_tools/tools/file.py b/python/zeroclaw_tools/tools/file.py new file mode 100644 index 0000000..92265e7 --- /dev/null +++ b/python/zeroclaw_tools/tools/file.py @@ -0,0 +1,60 @@ +""" +File read/write tools. +""" + +import os + +from langchain_core.tools import tool + + +MAX_FILE_SIZE = 100_000 + + +@tool +def file_read(path: str) -> str: + """ + Read the contents of a file at the given path. + + Args: + path: The file path to read (absolute or relative) + + Returns: + The file contents, or an error message + """ + try: + with open(path, "r", encoding="utf-8", errors="replace") as f: + content = f.read() + if len(content) > MAX_FILE_SIZE: + return content[:MAX_FILE_SIZE] + f"\n... (truncated, {len(content)} bytes total)" + return content + except FileNotFoundError: + return f"Error: File not found: {path}" + except PermissionError: + return f"Error: Permission denied: {path}" + except Exception as e: + return f"Error: {e}" + + +@tool +def file_write(path: str, content: str) -> str: + """ + Write content to a file, creating directories if needed. + + Args: + path: The file path to write to + content: The content to write + + Returns: + Success message or error + """ + try: + parent = os.path.dirname(path) + if parent: + os.makedirs(parent, exist_ok=True) + with open(path, "w", encoding="utf-8") as f: + f.write(content) + return f"Successfully wrote {len(content)} bytes to {path}" + except PermissionError: + return f"Error: Permission denied: {path}" + except Exception as e: + return f"Error: {e}" diff --git a/python/zeroclaw_tools/tools/memory.py b/python/zeroclaw_tools/tools/memory.py new file mode 100644 index 0000000..ae4167d --- /dev/null +++ b/python/zeroclaw_tools/tools/memory.py @@ -0,0 +1,86 @@ +""" +Memory storage tools for persisting data between conversations. +""" + +import json +import os +from pathlib import Path + +from langchain_core.tools import tool + + +def _get_memory_path() -> Path: + """Get the path to the memory storage file.""" + return Path.home() / ".zeroclaw" / "memory_store.json" + + +def _load_memory() -> dict: + """Load memory from disk.""" + path = _get_memory_path() + if not path.exists(): + return {} + try: + with open(path, "r") as f: + return json.load(f) + except Exception: + return {} + + +def _save_memory(data: dict) -> None: + """Save memory to disk.""" + path = _get_memory_path() + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as f: + json.dump(data, f, indent=2) + + +@tool +def memory_store(key: str, value: str) -> str: + """ + Store a key-value pair in persistent memory. + + Args: + key: The key to store under + value: The value to store + + Returns: + Confirmation message + """ + try: + data = _load_memory() + data[key] = value + _save_memory(data) + return f"Stored: {key}" + except Exception as e: + return f"Error: {e}" + + +@tool +def memory_recall(query: str) -> str: + """ + Search memory for entries matching the query. + + Args: + query: The search query + + Returns: + Matching entries or "no matches" message + """ + try: + data = _load_memory() + if not data: + return "No memories stored yet" + + query_lower = query.lower() + matches = { + k: v + for k, v in data.items() + if query_lower in k.lower() or query_lower in str(v).lower() + } + + if not matches: + return f"No matches for: {query}" + + return json.dumps(matches, indent=2) + except Exception as e: + return f"Error: {e}" diff --git a/python/zeroclaw_tools/tools/shell.py b/python/zeroclaw_tools/tools/shell.py new file mode 100644 index 0000000..81e896f --- /dev/null +++ b/python/zeroclaw_tools/tools/shell.py @@ -0,0 +1,32 @@ +""" +Shell execution tool. +""" + +import subprocess + +from langchain_core.tools import tool + + +@tool +def shell(command: str) -> str: + """ + Execute a shell command and return the output. + + Args: + command: The shell command to execute + + Returns: + The command output (stdout and stderr combined) + """ + try: + result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=60) + output = result.stdout + if result.stderr: + output += f"\nSTDERR: {result.stderr}" + if result.returncode != 0: + output += f"\nExit code: {result.returncode}" + return output or "(no output)" + except subprocess.TimeoutExpired: + return "Error: Command timed out after 60 seconds" + except Exception as e: + return f"Error: {e}" diff --git a/python/zeroclaw_tools/tools/web.py b/python/zeroclaw_tools/tools/web.py new file mode 100644 index 0000000..110770b --- /dev/null +++ b/python/zeroclaw_tools/tools/web.py @@ -0,0 +1,88 @@ +""" +Web-related tools: HTTP requests and web search. +""" + +import json +import os +import urllib.error +import urllib.parse +import urllib.request + +from langchain_core.tools import tool + + +@tool +def http_request(url: str, method: str = "GET", headers: str = "", body: str = "") -> str: + """ + Make an HTTP request to a URL. + + Args: + url: The URL to request + method: HTTP method (GET, POST, PUT, DELETE, etc.) + headers: Comma-separated headers in format "Name: Value, Name2: Value2" + body: Request body for POST/PUT requests + + Returns: + The response status and body + """ + try: + req_headers = {"User-Agent": "ZeroClaw/1.0"} + if headers: + for h in headers.split(","): + if ":" in h: + k, v = h.split(":", 1) + req_headers[k.strip()] = v.strip() + + data = body.encode() if body else None + req = urllib.request.Request(url, data=data, headers=req_headers, method=method.upper()) + + with urllib.request.urlopen(req, timeout=30) as resp: + body_text = resp.read().decode("utf-8", errors="replace") + return f"Status: {resp.status}\n{body_text[:5000]}" + except urllib.error.HTTPError as e: + error_body = e.read().decode("utf-8", errors="replace")[:1000] + return f"HTTP Error {e.code}: {error_body}" + except Exception as e: + return f"Error: {e}" + + +@tool +def web_search(query: str) -> str: + """ + Search the web using Brave Search API. + + Requires BRAVE_API_KEY environment variable to be set. + + Args: + query: The search query + + Returns: + Search results as formatted text + """ + api_key = os.environ.get("BRAVE_API_KEY", "") + if not api_key: + return "Error: BRAVE_API_KEY environment variable not set. Get one at https://brave.com/search/api/" + + try: + encoded_query = urllib.parse.quote(query) + url = f"https://api.search.brave.com/res/v1/web/search?q={encoded_query}" + + req = urllib.request.Request( + url, headers={"Accept": "application/json", "X-Subscription-Token": api_key} + ) + + with urllib.request.urlopen(req, timeout=10) as resp: + data = json.loads(resp.read().decode()) + results = [] + + for item in data.get("web", {}).get("results", [])[:5]: + title = item.get("title", "No title") + url_link = item.get("url", "") + desc = item.get("description", "")[:200] + results.append(f"- {title}\n {url_link}\n {desc}") + + if not results: + return "No results found" + return "\n\n".join(results) + except Exception as e: + return f"Error: {e}"