feat(python): add zeroclaw-tools companion package for LangGraph tool calling

- Add Python package with LangGraph-based agent for consistent tool calling
- Provides reliable tool execution for providers with inconsistent native support
- Includes tools: shell, file_read, file_write, web_search, http_request, memory
- Discord bot integration included
- CLI tool for quick interactions
- Works with any OpenAI-compatible provider (Z.AI, OpenRouter, Groq, etc.)

Why: Some LLM providers (e.g., GLM-5/Zhipu) have inconsistent tool calling behavior.
LangGraph's structured approach guarantees reliable tool execution across all providers.
This commit is contained in:
ZeroClaw Contributor 2026-02-17 01:35:40 +03:00 committed by Chummy
parent bc38994867
commit e5ef8a3b62
17 changed files with 1371 additions and 0 deletions

View file

@ -0,0 +1,113 @@
"""
CLI entry point for zeroclaw-tools.
"""
import argparse
import asyncio
import os
import sys
from langchain_core.messages import HumanMessage
from .agent import create_agent
from .tools import (
shell,
file_read,
file_write,
web_search,
http_request,
memory_store,
memory_recall,
)
DEFAULT_SYSTEM_PROMPT = """You are ZeroClaw, an AI assistant with full system access. Use tools to accomplish tasks.
Be concise and helpful. Execute tools directly without excessive explanation."""
async def chat(message: str, api_key: str, base_url: str, model: str) -> str:
"""Run a single chat message through the agent."""
agent = create_agent(
tools=[shell, file_read, file_write, web_search, http_request, memory_store, memory_recall],
model=model,
api_key=api_key,
base_url=base_url,
system_prompt=DEFAULT_SYSTEM_PROMPT,
)
result = await agent.ainvoke({"messages": [HumanMessage(content=message)]})
return result["messages"][-1].content or "Done."
def main():
"""CLI main entry point."""
parser = argparse.ArgumentParser(
description="ZeroClaw Tools - LangGraph-based tool calling for LLMs"
)
parser.add_argument("message", nargs="+", help="Message to send to the agent")
parser.add_argument("--model", "-m", default="glm-5", help="Model to use")
parser.add_argument("--api-key", "-k", default=None, help="API key")
parser.add_argument("--base-url", "-u", default=None, help="API base URL")
parser.add_argument("--interactive", "-i", action="store_true", help="Interactive mode")
args = parser.parse_args()
api_key = args.api_key or os.environ.get("API_KEY") or os.environ.get("GLM_API_KEY")
base_url = args.base_url or os.environ.get("API_BASE", "https://api.z.ai/api/coding/paas/v4")
if not api_key:
print("Error: API key required. Set API_KEY env var or use --api-key", file=sys.stderr)
sys.exit(1)
if args.interactive:
print("ZeroClaw Tools CLI (Interactive Mode)")
print("Type 'exit' to quit\n")
agent = create_agent(
tools=[
shell,
file_read,
file_write,
web_search,
http_request,
memory_store,
memory_recall,
],
model=args.model,
api_key=api_key,
base_url=base_url,
system_prompt=DEFAULT_SYSTEM_PROMPT,
)
history = []
while True:
try:
user_input = input("You: ").strip()
if not user_input:
continue
if user_input.lower() in ["exit", "quit", "q"]:
print("Goodbye!")
break
history.append(HumanMessage(content=user_input))
result = asyncio.run(agent.ainvoke({"messages": history}))
for msg in result["messages"][len(history) :]:
history.append(msg)
response = result["messages"][-1].content or "Done."
print(f"\nZeroClaw: {response}\n")
except KeyboardInterrupt:
print("\nGoodbye!")
break
else:
message = " ".join(args.message)
result = asyncio.run(chat(message, api_key, base_url, args.model))
print(result)
if __name__ == "__main__":
main()