Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
187 changes: 187 additions & 0 deletions src/adapters/langgraph.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import { resolve, join } from 'node:path';
import { loadAgentManifest, loadFileIfExists } from '../utils/loader.js';
import { loadAllSkills, getAllowedTools } from '../utils/skill-loader.js';

/**
* Export a gitagent directory to a LangGraph-compatible Python agent graph.
*
* Generates a ready-to-run Python file implementing a stateful ReAct agent
* using LangGraph's StateGraph with ToolNode. Supports multi-agent topologies
* via the gitagent `agents` manifest field. Compliance constraints are
* injected as system prompt rules.
*/
export function exportToLangGraph(dir: string): string {
const agentDir = resolve(dir);
const manifest = loadAgentManifest(agentDir);

const systemPrompt = buildSystemPrompt(agentDir, manifest);
const tools = buildToolDefinitions(agentDir);
const hasSubAgents = manifest.agents && Object.keys(manifest.agents).length > 0;
const modelImport = resolveModelImport(manifest.model?.preferred);

const lines: string[] = [];

lines.push('"""');
lines.push(`LangGraph agent graph for ${manifest.name} v${manifest.version}`);
lines.push('Generated by gitagent export --format langgraph');
lines.push('"""');
lines.push('');
lines.push('from typing import Annotated, TypedDict, Sequence');
lines.push('from langchain_core.messages import BaseMessage, HumanMessage, AIMessage');
lines.push('from langchain_core.tools import tool');
lines.push(`${modelImport}`);
lines.push('from langgraph.graph import StateGraph, END');
lines.push('from langgraph.prebuilt import ToolNode');
lines.push('import operator');
lines.push('');

lines.push('# --- Agent State ---');
lines.push('class AgentState(TypedDict):');
lines.push(' messages: Annotated[Sequence[BaseMessage], operator.add]');
lines.push('');

if (tools.length > 0) {
lines.push('# --- Tools ---');
for (const t of tools) {
const funcName = t.name.replace(/[^a-zA-Z0-9]/g, '_');
lines.push('');
lines.push('@tool');
lines.push(`def ${funcName}(input: str) -> str:`);
lines.push(` """${t.description}"""`);
lines.push(` # TODO: implement tool logic`);
lines.push(` raise NotImplementedError("${funcName} not yet implemented")`);
}
lines.push('');
lines.push(`tools = [${tools.map(t => t.name.replace(/[^a-zA-Z0-9]/g, '_')).join(', ')}]`);
} else {
lines.push('tools = []');
}

lines.push('');
lines.push('# --- Model with tool binding ---');
lines.push(`llm = ${resolveModelInstantiation(manifest.model?.preferred)}`);
lines.push('llm_with_tools = llm.bind_tools(tools)');

lines.push('');
lines.push('SYSTEM_PROMPT = """' + systemPrompt.replace(/"""/g, '\\"\\"\\"') + '"""');

lines.push('');
lines.push('# --- Graph Nodes ---');
lines.push('def agent_node(state: AgentState) -> dict:');
lines.push(' """Main reasoning node."""');
lines.push(' messages = state["messages"]');
lines.push(' from langchain_core.messages import SystemMessage');
lines.push(' if not any(isinstance(m, SystemMessage) for m in messages):');
lines.push(' messages = [SystemMessage(content=SYSTEM_PROMPT)] + list(messages)');
lines.push(' response = llm_with_tools.invoke(messages)');
lines.push(' return {"messages": [response]}');
lines.push('');
lines.push('tool_node = ToolNode(tools)');
lines.push('');
lines.push('def should_continue(state: AgentState) -> str:');
lines.push(' last = state["messages"][-1]');
lines.push(' if hasattr(last, "tool_calls") and last.tool_calls:');
lines.push(' return "tools"');
lines.push(' return END');

lines.push('');
lines.push('# --- Build Graph ---');
lines.push('workflow = StateGraph(AgentState)');
lines.push('workflow.add_node("agent", agent_node)');
lines.push('workflow.add_node("tools", tool_node)');
lines.push('workflow.set_entry_point("agent")');
lines.push('workflow.add_conditional_edges("agent", should_continue)');
lines.push('workflow.add_edge("tools", "agent")');
lines.push('graph = workflow.compile()');

if (hasSubAgents) {
lines.push('');
lines.push('# --- Sub-Agents ---');
lines.push('# Sub-agents defined in agent.yaml can be modelled as additional');
lines.push('# StateGraph nodes with supervisor routing.');
lines.push('# See: https://langchain-ai.github.io/langgraph/tutorials/multi_agent/multi-agent-collaboration/');
for (const [name] of Object.entries(manifest.agents ?? {})) {
lines.push(`# Sub-agent: ${name}`);
}
}

lines.push('');
lines.push('if __name__ == "__main__":');
lines.push(` print("Agent: ${manifest.name} v${manifest.version}")`);
lines.push(' while True:');
lines.push(' user_input = input("You: ").strip()');
lines.push(' if not user_input or user_input.lower() in ("exit", "quit"):');
lines.push(' break');
lines.push(' result = graph.invoke({"messages": [HumanMessage(content=user_input)]})');
lines.push(' last_ai = next((m for m in reversed(result["messages"]) if isinstance(m, AIMessage)), None)');
lines.push(' if last_ai:');
lines.push(' print(f"Agent: {last_ai.content}")');

return lines.join('\n');
}

function buildSystemPrompt(agentDir: string, manifest: ReturnType<typeof loadAgentManifest>): string {
const parts: string[] = [];

const soul = loadFileIfExists(join(agentDir, 'SOUL.md'));
if (soul) parts.push(soul);

const rules = loadFileIfExists(join(agentDir, 'RULES.md'));
if (rules) parts.push(`## Rules\n${rules}`);

const skillsDir = join(agentDir, 'skills');
const skills = loadAllSkills(skillsDir);
for (const skill of skills) {
const allowedTools = getAllowedTools(skill.frontmatter);
const toolsNote = allowedTools.length > 0 ? `\nAllowed tools: ${allowedTools.join(', ')}` : '';
parts.push(`## Skill: ${skill.frontmatter.name}\n${skill.frontmatter.description}${toolsNote}\n\n${skill.instructions}`);
}

if (manifest.compliance) {
const c = manifest.compliance;
const constraints: string[] = ['## Compliance Constraints'];
if (c.communications?.fair_balanced) constraints.push('- All outputs must be fair and balanced (FINRA 2210)');
if (c.communications?.no_misleading) constraints.push('- Never make misleading or promissory statements');
if (c.data_governance?.pii_handling === 'redact') constraints.push('- Redact all PII from outputs');
if (c.supervision?.human_in_the_loop === 'always') constraints.push('- All decisions require human approval');
if (manifest.compliance.segregation_of_duties) {
const sod = manifest.compliance.segregation_of_duties;
if (sod.conflicts) {
constraints.push('- Segregation of duties conflicts:');
for (const [a, b] of sod.conflicts) {
constraints.push(` - "${a}" and "${b}" may not be held by the same agent`);
}
}
}
if (constraints.length > 1) parts.push(constraints.join('\n'));
}

return parts.join('\n\n');
}

interface ToolDef {
name: string;
description: string;
}

function buildToolDefinitions(agentDir: string): ToolDef[] {
const skills = loadAllSkills(join(agentDir, 'skills'));
return skills.map(s => ({
name: s.frontmatter.name,
description: s.frontmatter.description ?? s.frontmatter.name,
}));
}

function resolveModelImport(model?: string): string {
if (!model) return 'from langchain_openai import ChatOpenAI';
if (model.startsWith('claude')) return 'from langchain_anthropic import ChatAnthropic';
if (model.startsWith('gemini')) return 'from langchain_google_genai import ChatGoogleGenerativeAI';
return 'from langchain_openai import ChatOpenAI';
}

function resolveModelInstantiation(model?: string): string {
if (!model) return 'ChatOpenAI(model="gpt-4o", temperature=0.3)';
if (model.startsWith('claude')) return `ChatAnthropic(model="${model}", temperature=0.3)`;
if (model.startsWith('gemini')) return `ChatGoogleGenerativeAI(model="${model}", temperature=0.3)`;
return `ChatOpenAI(model="${model}", temperature=0.3)`;
}
40 changes: 40 additions & 0 deletions src/runners/langgraph.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import { writeFileSync, unlinkSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import { spawnSync } from 'node:child_process';
import { randomBytes } from 'node:crypto';
import { exportToLangGraph } from '../adapters/langgraph.js';
import { AgentManifest } from '../utils/loader.js';
import { error, info } from '../utils/format.js';

interface RunOptions {
prompt?: string;
}

export function runWithLangGraph(agentDir: string, _manifest: AgentManifest, _options: RunOptions = {}): void {
const script = exportToLangGraph(agentDir);
const tmpFile = join(tmpdir(), `gitagent-langgraph-${randomBytes(4).toString('hex')}.py`);

writeFileSync(tmpFile, script, 'utf-8');

info(`Running LangGraph agent from "${agentDir}"...`);
info('Make sure langgraph, langchain-core, and a model package are installed.');

try {
const result = spawnSync('python3', [tmpFile], {
stdio: 'inherit',
cwd: agentDir,
env: { ...process.env },
});

if (result.error) {
error(`Failed to run Python: ${result.error.message}`);
info('Install: pip install langgraph langchain-core langchain-openai');
process.exit(1);
}

process.exit(result.status ?? 0);
} finally {
try { unlinkSync(tmpFile); } catch { /* ignore */ }
}
}