Skip to content

LangGraph Cheatsheet

2页浓缩精华 · 核心概念 + 代码模板 + 最佳实践


🏗️ 核心架构速查

基础结构

python
from langgraph.graph import StateGraph, START, END
from langgraph.checkpoint.memory import MemorySaver

# 1. 定义状态
from typing import TypedDict, Annotated
import operator

class State(TypedDict):
    messages: Annotated[list, operator.add]  # Reducer
    context: str

# 2. 定义节点
def node_a(state: State) -> State:
    return {"messages": [{"role": "ai", "content": "处理A"}]}

# 3. 构建图
graph = StateGraph(State)
graph.add_node("a", node_a)
graph.add_edge(START, "a")
graph.add_edge("a", END)

# 4. 编译
app = graph.compile(checkpointer=MemorySaver())

# 5. 执行
result = app.invoke({"messages": []}, config={"configurable": {"thread_id": "1"}})

条件路由

python
def router(state: State) -> str:
    if state.get("needs_tool"):
        return "tools"
    return "end"

graph.add_conditional_edges("agent", router, {"tools": "tools", "end": END})

🔧 工具集成速查

Tool + ToolNode

python
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode, tools_condition

# 定义工具
@tool
def search(query: str) -> str:
    """搜索工具,查询互联网信息"""
    return f"搜索结果: {query}"

# 绑定到LLM
tools = [search]
llm_with_tools = llm.bind_tools(tools)

# 节点定义
def agent(state):
    return {"messages": [llm_with_tools.invoke(state["messages"])]}

# 添加到图
graph.add_node("agent", agent)
graph.add_node("tools", ToolNode(tools))
graph.add_conditional_edges("agent", tools_condition)  # 自动路由
graph.add_edge("tools", "agent")  # 循环回agent

ReAct Agent 完整模板

python
from langgraph.graph import MessagesState
from langgraph.prebuilt import create_react_agent

# 一行创建ReAct Agent
agent = create_react_agent(llm, tools, checkpointer=MemorySaver())

# 使用
for chunk in agent.stream(
    {"messages": [("user", "搜索LangGraph教程")]},
    config={"configurable": {"thread_id": "user_123"}}
):
    print(chunk)

💾 记忆系统速查

Checkpointer(短期记忆)

python
from langgraph.checkpoint.memory import MemorySaver
from langgraph.checkpoint.sqlite import SqliteSaver

# 内存版(开发)
memory = MemorySaver()

# 持久化版(生产)
memory = SqliteSaver.from_conn_string("checkpoints.db")

app = graph.compile(checkpointer=memory)

# 按thread_id区分会话
config = {"configurable": {"thread_id": "user_123"}}
app.invoke(input, config)

Store(长期记忆)

python
from langgraph.store.memory import InMemoryStore

store = InMemoryStore()

# 存储
store.put(("users", "user_123"), "profile", {"name": "Alice", "age": 30})

# 读取
profile = store.get(("users", "user_123"), "profile")

# 搜索
results = store.search(("users",), filter={"age": {"$gte": 25}})

# 集成到图
app = graph.compile(checkpointer=memory, store=store)

🔄 状态管理速查

常用 Reducers

python
import operator
from typing import Annotated
from langgraph.graph import add_messages

# 加法合并(数字、列表)
count: Annotated[int, operator.add]

# 消息智能合并(推荐)
messages: Annotated[list, add_messages]

# 覆盖(默认行为)
status: str

# 自定义Reducer
def merge_dict(left, right):
    return {**left, **right}

data: Annotated[dict, merge_dict]

消息操作

python
from langgraph.graph import MessagesState, RemoveMessage
from langgraph.prebuilt import trim_messages, filter_messages

# 裁剪消息(保留最近N条)
trimmed = trim_messages(state["messages"], max_messages=10)

# 过滤消息
filtered = filter_messages(state["messages"], include_types=["human", "ai"])

# 删除消息
return {"messages": [RemoveMessage(id=msg.id)]}

🎛️ 人机协同速查

Breakpoints

python
# 编译时设置断点
app = graph.compile(
    checkpointer=memory,
    interrupt_before=["human_review"],  # 节点前暂停
    interrupt_after=["agent"]           # 节点后暂停
)

# 查看状态
state = app.get_state(config)
print(state.values)  # 当前状态
print(state.next)    # 下一步节点

# 更新状态后继续
app.update_state(config, {"approved": True})
app.invoke(None, config)  # 继续执行

动态中断

python
from langgraph.types import interrupt

def review_node(state):
    if needs_human_review(state):
        # 请求人工审核,附带信息
        decision = interrupt({"data": state["draft"], "question": "是否批准?"})
        return {"approved": decision}
    return {"approved": True}

🚀 高级模式速查

Parallelization(并行)

python
# 方式1:多个普通边(自动并行)
graph.add_edge(START, "node_a")
graph.add_edge(START, "node_b")  # node_a和node_b并行执行

# 方式2:Send API(动态并行)
from langgraph.types import Send

def fan_out(state):
    return [Send("worker", {"task": t}) for t in state["tasks"]]

graph.add_conditional_edges(START, fan_out)

Subgraph(子图)

python
# 定义子图
sub_graph = StateGraph(SubState)
sub_graph.add_node("step1", sub_node1)
sub_graph.add_edge("step1", END)
sub_app = sub_graph.compile()

# 嵌入主图
graph.add_node("subprocess", sub_app)

Map-Reduce

python
from langgraph.types import Send

def map_phase(state):
    # 分发任务
    return [Send("worker", {"item": item}) for item in state["items"]]

def reduce_phase(state):
    # 汇总结果(自动调用,因为reducer)
    return {"summary": combine(state["results"])}

graph.add_conditional_edges("map", map_phase)
graph.add_node("worker", process_item)  # 并行执行
graph.add_edge("worker", "reduce")       # 自动收集

🎨 消息系统速查

消息类型

python
from langchain_core.messages import (
    HumanMessage,    # 用户输入
    AIMessage,       # AI回复(含tool_calls)
    SystemMessage,   # 系统指令
    ToolMessage      # 工具结果
)

messages = [
    SystemMessage(content="你是助手"),
    HumanMessage(content="查天气"),
    AIMessage(content="", tool_calls=[{
        "name": "search", "args": {"q": "天气"}
    }]),
    ToolMessage(content="晴天", tool_call_id="call_123")
]

📦 生产部署速查

LangGraph Platform

python
# langgraph.json
{
  "dependencies": ["langchain-openai", "tavily-python"],
  "graphs": {
    "agent": "./agent.py:graph"
  },
  "env": ".env"
}

# agent.py
from langgraph.graph import StateGraph
graph = StateGraph(State)
# ... 定义图 ...
graph = graph.compile(checkpointer=...)  # 必须赋值给graph变量

LangGraph SDK

python
from langgraph_sdk import get_client

# 连接远程
client = get_client(url="http://localhost:8123")

# 调用
thread = await client.threads.create()
async for chunk in client.runs.stream(
    thread["thread_id"],
    "agent",  # assistant名称
    input={"messages": [{"role": "user", "content": "你好"}]}
):
    print(chunk)

Double Texting策略

python
# 配置multitask_strategy
app = graph.compile(
    checkpointer=memory,
    multitask_strategy="reject"  # reject/enqueue/interrupt/rollback
)

🎯 最佳实践清单

✅ 开发阶段

  • [ ] 使用 MemorySaver 快速原型
  • [ ] 用 LangGraph Studio 可视化调试
  • [ ] 每个节点打印日志便于追踪
  • [ ] 定义清晰的 State Schema(TypedDict)
  • [ ] 工具 docstring 要详细(LLM依赖它)

✅ 生产部署

  • [ ] 切换到 SqliteSaverPostgresSaver
  • [ ] 设置 recursion_limit 防止无限循环
  • [ ] 实现异常处理和重试机制
  • [ ] 使用 interrupt() 处理敏感操作
  • [ ] 配置 Double Texting 策略
  • [ ] 监控 token 使用和成本

✅ 性能优化

  • [ ] 使用 Parallelization 提升速度
  • [ ] trim_messages 控制上下文长度
  • [ ] 工具支持 parallel_tool_calls
  • [ ] 异步执行(astream)提升吞吐
  • [ ] Store 使用索引加速查询

📚 核心API速查

操作方法说明
同步执行app.invoke(input, config)运行到结束
流式执行app.stream(input, config)逐步返回
异步流式await app.astream(input, config)异步版
获取状态app.get_state(config)查看当前状态
更新状态app.update_state(config, values)编辑状态
历史记录app.get_state_history(config)Time Travel

🔗 关键导入速查

python
# 核心
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.graph import add_messages
from langgraph.checkpoint.memory import MemorySaver
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.store.memory import InMemoryStore

# 工具
from langgraph.prebuilt import ToolNode, tools_condition, create_react_agent

# 人机协同
from langgraph.types import interrupt, Command
from langgraph.errors import NodeInterrupt

# 高级
from langgraph.types import Send
from langgraph.prebuilt import trim_messages, filter_messages

# 消息
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, ToolMessage

💡 5分钟快速开始

python
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool

# 1. 定义工具
@tool
def calculator(expression: str) -> str:
    """计算数学表达式,如'2+2'"""
    return str(eval(expression))

# 2. 创建Agent(一行代码)
llm = ChatOpenAI(model="gpt-4o-mini")
agent = create_react_agent(llm, [calculator], checkpointer=MemorySaver())

# 3. 运行
config = {"configurable": {"thread_id": "1"}}
for msg in agent.stream({"messages": [("user", "计算25*4")]}, config):
    print(msg)

查阅提示:

  • 🔍 搜索关键词快速定位
  • 📋 复制代码模板直接使用
  • 🎯 对比"速查"和"详细教程"加深理解

本 Cheatsheet 覆盖 LangGraph 90% 的常用场景

基于 MIT 许可证发布。内容版权归作者所有。