Skip to main content

Basic Chat

Send messages to a chat agent and get a response:
from reminix import Reminix

client = Reminix()

response = client.agents.chat(
    "chat-assistant",
    messages=[
        {"role": "user", "content": "Hello! What can you do?"}
    ]
)

# Returns messages array with assistant responses
for msg in response.messages:
    print(msg.content)

# If conversation persistence is enabled, a conversation_id is returned
if response.conversation_id:
    print(f"Conversation ID: {response.conversation_id}")

Message Format

OpenAI-compatible: roles system, developer, user, assistant, tool; content is string, array of parts (e.g. text, image_url), or null. Use Message and ToolCall from reminix_runtime for handlers; use message_content_to_text when displaying content that may be an array.
messages = [
    # System message (optional) - sets behavior
    {"role": "system", "content": "You are a helpful assistant."},
    
    # User message
    {"role": "user", "content": "Hello!"},
    
    # Assistant message (from previous response)
    {"role": "assistant", "content": "Hi! How can I help?"},
    
    # Another user message
    {"role": "user", "content": "What's the weather?"}
]

response = client.agents.chat("chat-assistant", messages=messages)

Conversations

Maintain conversation history:
messages = []

def chat(user_message: str) -> str:
    # Add user message
    messages.append({"role": "user", "content": user_message})
    
    # Get response
    response = client.agents.chat("chat-assistant", messages=messages)
    
    # Extract assistant message (first message in response)
    assistant_message = response.messages[0]
    
    # Add assistant response to history
    messages.append({"role": assistant_message.role, "content": assistant_message.content})
    
    return assistant_message.content

# Conversation
print(chat("Hello!"))
# "Hi! How can I help you today?"

print(chat("What's the capital of France?"))
# "The capital of France is Paris."

print(chat("What about Germany?"))
# "The capital of Germany is Berlin."

Multimodal Content

Send images alongside text:
response = client.agents.chat(
    "vision-agent",
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "What's in this image?"},
                {
                    "type": "image_url",
                    "image_url": {"url": "https://example.com/image.jpg"}
                }
            ]
        }
    ]
)

Base64 Images

import base64

with open("image.png", "rb") as f:
    image_data = base64.b64encode(f.read()).decode()

response = client.agents.chat(
    "vision-agent",
    messages=[
        {
            "role": "user",
            "content": [
                {"type": "text", "text": "Describe this image"},
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/png;base64,{image_data}"}
                }
            ]
        }
    ]
)

Streaming Chat

Stream responses for real-time chat UI:
print("Assistant: ", end="")

for chunk in client.agents.chat(
    "chat-assistant",
    messages=[{"role": "user", "content": "Tell me a story"}],
    stream=True
):
    print(chunk, end="", flush=True)

print()

Collecting Streamed Response

chunks = []

for chunk in client.agents.chat(
    "chat-assistant",
    messages=messages,
    stream=True
):
    chunks.append(chunk)
    print(chunk, end="", flush=True)

full_response = "".join(chunks)

# Add to conversation history
messages.append({"role": "assistant", "content": full_response})

With Context

Pass additional context:
response = client.agents.chat(
    "support-agent",
    messages=[{"role": "user", "content": "What's my order status?"}],
    context={
        "identity": {
            "user_id": "user_456"
        },
        "order_id": "order_789",
        "customer_tier": "premium"
    }
)

Tool Calls

If your agent uses tools/functions:
response = client.agents.chat(
    "assistant",
    messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)

# Agent might return tool calls in the message
message = response.messages[0]
if message.tool_calls:
    for tool_call in message.tool_calls:
        print(f"Tool: {tool_call.function.name}")
        print(f"Args: {tool_call.function.arguments}")
        
        # Execute the tool and send result back
        tool_result = execute_tool(tool_call)
        
        messages.append({
            "role": "assistant",
            "content": None,
            "tool_calls": [{"id": tool_call.id, "type": "function", "function": {"name": tool_call.function.name, "arguments": tool_call.function.arguments}}]
        })
        messages.append({
            "role": "tool",
            "name": tool_call.function.name,
            "tool_call_id": tool_call.id,
            "content": tool_result
        })
        
        # Get final response
        final = client.agents.chat("assistant", messages=messages)
        print(final.messages[0].content)

Async Chat

For async applications:
import asyncio
from reminix import AsyncReminix

async def main() -> None:
    client = AsyncReminix()
    
    # Async chat
    response = await client.agents.chat(
        "chat-assistant",
        messages=[{"role": "user", "content": "Hello!"}]
    )
    print(response.messages[0].content)
    
    # Async streaming
    async for chunk in await client.agents.chat(
        "chat-assistant",
        messages=[{"role": "user", "content": "Tell me a story"}],
        stream=True
    ):
        print(chunk, end="", flush=True)
    print()

asyncio.run(main())