Skip to content
185 changes: 185 additions & 0 deletions examples/agent_tool_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
"""Search and execute example: LLM-driven tool discovery and execution.

There are two ways to give tools to an LLM:

1. ``toolset.openai()`` — fetches ALL tools and converts them to OpenAI format.
Token cost scales with the number of tools in your catalog.

2. ``toolset.openai(mode="search_and_execute")`` — returns just 2 tools
(tool_search + tool_execute). The LLM discovers and runs tools on-demand,
keeping token usage constant regardless of catalog size.

This example demonstrates approach 2 with two patterns:
- Raw client (Gemini): manual agent loop with ``toolset.execute()``
- LangChain: framework handles tool execution automatically

Prerequisites:
- STACKONE_API_KEY environment variable
- STACKONE_ACCOUNT_ID environment variable
- GOOGLE_API_KEY environment variable (for Gemini/LangChain)

Run with:
uv run python examples/agent_tool_search.py
"""

from __future__ import annotations

import json
import os

try:
from dotenv import load_dotenv

load_dotenv()
except ModuleNotFoundError:
pass

from stackone_ai import StackOneToolSet


def example_gemini() -> None:
"""Raw client: Gemini via OpenAI-compatible API.

Shows: init toolset -> get OpenAI tools -> manual agent loop with toolset.execute().
"""
print("=" * 60)
print("Example 1: Raw client (Gemini) — manual execution")
print("=" * 60)
print()

try:
from openai import OpenAI
except ImportError:
print("Skipped: pip install openai")
print()
return

google_key = os.getenv("GOOGLE_API_KEY")
if not google_key:
print("Skipped: Set GOOGLE_API_KEY to run this example.")
print()
return

# 1. Init toolset
account_id = os.getenv("STACKONE_ACCOUNT_ID")
toolset = StackOneToolSet(
account_id=account_id,
search={"method": "semantic", "top_k": 3},
execute={"account_ids": [account_id]} if account_id else None,
)

# 2. Get tools in OpenAI format
openai_tools = toolset.openai(mode="search_and_execute")

# 3. Create Gemini client (OpenAI-compatible) and run agent loop
client = OpenAI(
api_key=google_key,
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)
messages: list[dict] = [
{"role": "user", "content": "List my upcoming Calendly events for the next week."},
]

for _step in range(10):
response = client.chat.completions.create(
model="gemini-3-pro-preview",
messages=messages,
tools=openai_tools,
tool_choice="auto",
)

choice = response.choices[0]

# 4. If no tool calls, print final answer and stop
if not choice.message.tool_calls:
print(f"Answer: {choice.message.content}")
break

# 5. Execute tool calls manually and feed results back
messages.append(choice.message.model_dump(exclude_none=True))
for tool_call in choice.message.tool_calls:
print(f" -> {tool_call.function.name}({tool_call.function.arguments})")
result = toolset.execute(tool_call.function.name, tool_call.function.arguments)
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result),
}
)

print()


def example_langchain() -> None:
"""Framework: LangChain with auto-execution.

Shows: init toolset -> get LangChain tools -> bind to model -> framework executes tools.
No toolset.execute() needed — the framework calls _run() on tools automatically.
"""
print("=" * 60)
print("Example 2: LangChain — framework handles execution")
print("=" * 60)
print()

try:
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_google_genai import ChatGoogleGenerativeAI
except ImportError:
print("Skipped: pip install langchain-google-genai")
print()
return

if not os.getenv("GOOGLE_API_KEY"):
print("Skipped: Set GOOGLE_API_KEY to run this example.")
print()
return

# 1. Init toolset
account_id = os.getenv("STACKONE_ACCOUNT_ID")
toolset = StackOneToolSet(
account_id=account_id,
search={"method": "semantic", "top_k": 3},
execute={"account_ids": [account_id]} if account_id else None,
)

# 2. Get tools in LangChain format and bind to model
langchain_tools = toolset.langchain(mode="search_and_execute")
tools_by_name = {tool.name: tool for tool in langchain_tools}
model = ChatGoogleGenerativeAI(model="gemini-3-pro-preview").bind_tools(langchain_tools)

# 3. Run agent loop
messages = [HumanMessage(content="List my upcoming Calendly events for the next week.")]

for _step in range(10):
response: AIMessage = model.invoke(messages)

# 4. If no tool calls, print final answer and stop
if not response.tool_calls:
print(f"Answer: {response.content}")
Copy link

@cubic-dev-ai cubic-dev-ai bot Mar 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P3: Use response.text here. Gemini returns structured content blocks in AIMessage.content, so this prints the raw payload instead of the final answer.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At examples/meta_tools_example.py, line 159:

<comment>Use `response.text` here. Gemini returns structured content blocks in `AIMessage.content`, so this prints the raw payload instead of the final answer.</comment>

<file context>
@@ -110,14 +111,74 @@ def example_gemini() -> None:
+
+        # 4. If no tool calls, print final answer and stop
+        if not response.tool_calls:
+            print(f"Answer: {response.content}")
+            break
+
</file context>
Suggested change
print(f"Answer: {response.content}")
print(f"Answer: {response.text}")
Fix with Cubic

break

# 5. Framework-compatible execution — invoke LangChain tools directly
messages.append(response)
for tool_call in response.tool_calls:
print(f" -> {tool_call['name']}({json.dumps(tool_call['args'])})")
tool = tools_by_name[tool_call["name"]]
result = tool.invoke(tool_call["args"])
Copy link

@cubic-dev-ai cubic-dev-ai bot Mar 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Handle missing MCP dependencies before invoking LangChain tools. tool_execute calls fetch_tools() under the hood, so this example crashes at the first tool call when stackone-ai[mcp] is not installed.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At examples/meta_tools_example.py, line 167:

<comment>Handle missing MCP dependencies before invoking LangChain tools. `tool_execute` calls `fetch_tools()` under the hood, so this example crashes at the first tool call when `stackone-ai[mcp]` is not installed.</comment>

<file context>
@@ -110,14 +111,74 @@ def example_gemini() -> None:
+        for tool_call in response.tool_calls:
+            print(f"  -> {tool_call['name']}({json.dumps(tool_call['args'])})")
+            tool = tools_by_name[tool_call["name"]]
+            result = tool.invoke(tool_call["args"])
+            messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tool_call["id"]))
+
</file context>
Fix with Cubic

messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tool_call["id"]))

print()


def main() -> None:
"""Run all examples."""
api_key = os.getenv("STACKONE_API_KEY")
if not api_key:
print("Set STACKONE_API_KEY to run these examples.")
return

example_gemini()
example_langchain()

Comment on lines +173 to +182

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these examples should be using with a client. i.e. how do you use with anthropic and openai. not just like here's it in langchain format. how would they then use the langchain format in an LLM

we want init_stackone_tools -> pass to LLM client -> show how to use LLM client with stackone tools
(for each and all LLM clients / tool formats that we support)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated!


if __name__ == "__main__":
main()
3 changes: 2 additions & 1 deletion stackone_ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
SemanticSearchResponse,
SemanticSearchResult,
)
from stackone_ai.toolset import SearchConfig, SearchMode, SearchTool, StackOneToolSet
from stackone_ai.toolset import ExecuteToolsConfig, SearchConfig, SearchMode, SearchTool, StackOneToolSet

__all__ = [
"StackOneToolSet",
"StackOneTool",
"Tools",
"ExecuteToolsConfig",
"SearchConfig",
"SearchMode",
"SearchTool",
Expand Down
4 changes: 4 additions & 0 deletions stackone_ai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,10 @@ def to_langchain(self) -> BaseTool:
python_type = int
elif type_str == "boolean":
python_type = bool
elif type_str == "object":
python_type = dict
elif type_str == "array":
python_type = list

field = Field(description=details.get("description", ""))
else:
Expand Down
Loading