Skip to main content

Overview

reminix-langgraph lets you deploy LangGraph state machines and workflows as Reminix agents.

Installation

pip install reminix-langgraph
This will also install reminix-runtime as a dependency.

Quick Start

Wrap your LangGraph workflow and serve it:
from typing import TypedDict
from langgraph.graph import StateGraph, END
from reminix_langgraph import serve_agent

# Define your graph
class AgentState(TypedDict):
    messages: list
    next_step: str

def process_node(state):
    # Process logic
    return {"next_step": "respond"}

def respond_node(state):
    # Response logic
    return {"messages": state["messages"] + ["Response"]}

graph = StateGraph(AgentState)
graph.add_node("process", process_node)
graph.add_node("respond", respond_node)
graph.add_edge("process", "respond")
graph.add_edge("respond", END)
graph.set_entry_point("process")

workflow = graph.compile()

# Wrap and serve
if __name__ == "__main__":
    serve_agent(workflow, name="workflow-agent", port=8080)

Wrapping Workflows

Basic Graph

from langgraph.graph import StateGraph
from reminix_langgraph import wrap_agent

graph = StateGraph(MyState)
# ... define nodes and edges
workflow = graph.compile()

agent = wrap_agent(workflow, name="my-workflow")

With Checkpointing

from langgraph.checkpoint.memory import MemorySaver
from reminix_langgraph import wrap_agent

checkpointer = MemorySaver()
workflow = graph.compile(checkpointer=checkpointer)

agent = wrap_agent(
    workflow,
    name="stateful-workflow",
    checkpointer=checkpointer
)

Prebuilt Agents

from langgraph.prebuilt import create_react_agent
from reminix_langgraph import wrap_agent

agent = create_react_agent(llm, tools)
reminix_agent = wrap_agent(agent, name="react-agent")

Configuration

agent = wrap_agent(
    workflow,
    name="my-workflow",
    description="Multi-step processing workflow",

    # State mapping
    input_key="messages",
    output_key="messages",

    # Streaming
    streaming=True,
    stream_mode="updates"  # "values" or "updates"
)

Multiple Agents

For multi-agent projects, use wrap_agent + serve instead of serve_agent:
from reminix_langgraph import wrap_agent
from reminix_runtime import serve

workflow1 = wrap_agent(graph1.compile(), name="research-workflow")
workflow2 = wrap_agent(graph2.compile(), name="writing-workflow")

serve(agents=[workflow1, workflow2], port=8080)
See Multiple Agents for detailed guidance on multi-agent projects.

State Management

Input Mapping

Map Reminix input to LangGraph state:
def map_input(reminix_input: dict) -> AgentState:
    return {
        "messages": [{"role": "user", "content": reminix_input["query"]}],
        "context": reminix_input.get("context", {})
    }

agent = wrap_agent(
    workflow,
    name="my-workflow",
    input_mapper=map_input
)

Output Mapping

Map LangGraph output to Reminix response:
def map_output(state: AgentState) -> dict:
    return {
        "response": state["messages"][-1]["content"],
        "steps": len(state["messages"])
    }

agent = wrap_agent(
    workflow,
    name="my-workflow",
    output_mapper=map_output
)

Streaming

Stream graph updates in real-time:
from reminix import Reminix

client = Reminix()

for chunk in client.agents.invoke(
    "workflow-agent",
    messages=[{"role": "user", "content": "Process this"}],
    stream=True
):
    print(chunk, end="", flush=True)

Example: Multi-Step Research Agent

from typing import TypedDict
from langgraph.graph import StateGraph, END
from reminix_langgraph import wrap_agent
from reminix_runtime import serve

class ResearchState(TypedDict):
    query: str
    search_results: list
    analysis: str
    summary: str

def search(state):
    results = web_search(state["query"])
    return {"search_results": results}

def analyze(state):
    analysis = analyze_results(state["search_results"])
    return {"analysis": analysis}

def summarize(state):
    summary = create_summary(state["analysis"])
    return {"summary": summary}

graph = StateGraph(ResearchState)
graph.add_node("search", search)
graph.add_node("analyze", analyze)
graph.add_node("summarize", summarize)
graph.add_edge("search", "analyze")
graph.add_edge("analyze", "summarize")
graph.add_edge("summarize", END)
graph.set_entry_point("search")

workflow = graph.compile()
agent = wrap_agent(workflow, name="research-agent")

if __name__ == "__main__":
    serve(agents=[agent], port=8080)

Usage

Once deployed, call your agent using invoke. See Agents for detailed guidance.

With Messages

For graph-based workflows with message history:
from reminix import Reminix

client = Reminix()

response = client.agents.invoke(
    "workflow-agent",
    messages=[{"role": "user", "content": "Research AI trends"}]
)

print(response.output)

With Custom Input

For workflows with custom input keys:
response = client.agents.invoke(
    "research-agent",
    query="Research AI trends in healthcare"
)

print(response.output)

Streaming

For real-time streaming responses:
for chunk in client.agents.invoke(
    "workflow-agent",
    messages=[{"role": "user", "content": "Research AI trends"}],
    stream=True
):
    print(chunk, end="", flush=True)

Deployment

See the Deployment guide for deploying to Reminix, or Self-Hosting for deploying on your own infrastructure.

Next Steps