Durable agent loops, per-step cost tracking, and multi-agent workflows—across any model.
import { ModelRelay, chain, llm } from "@modelrelay/sdk";
const mr = ModelRelay.fromSecretKey("mr_sk_...");
const spec = chain([
llm("research", n => n.system("Research assistant.").user("{{task}}")),
llm("summarize", n => n.system("Summarize.").user("{{research}}")),
], { name: "research-pipeline" })
.output("result", "summarize")
.build();
const { run_id } = await mr.runs.create(spec, {
input: { task: "Analyze Q4 results" },
});use modelrelay::{chain, llm, ChainOptions};
let spec = chain(
vec![
llm("research", |n| n.system("Research assistant.").user("{{task}}")),
llm("summarize", |n| n.system("Summarize.").user("{{research}}")),
],
ChainOptions {
name: Some("research-pipeline".into()),
..Default::default()
},
)
.output("result", "summarize", None)
.build()?;
let run = client.runs().create(spec).await?;spec, _ := sdk.Chain([]sdk.WorkflowIntentNode{
sdk.LLM("research", func(n sdk.LLMNodeBuilder) sdk.LLMNodeBuilder {
return n.System("Research assistant.").User("{{task}}")
}),
sdk.LLM("summarize", func(n sdk.LLMNodeBuilder) sdk.LLMNodeBuilder {
return n.System("Summarize.").User("{{research}}")
}),
}, sdk.ChainOptions{Name: "research-pipeline"}).
Output("result", "summarize").Build()
run, _ := client.Runs.Create(ctx, spec, sdk.WithRunInputs(
map[string]any{"task": "Analyze Q4 results"},
))Define agents with tools, then coordinate them in DAG workflows. Each step is tracked for usage and cost, with tool hooks executing on your infrastructure.
Workflows compile to type-safe DAGs (directed acyclic graphs) so complex, branching agent systems stay predictable and debuggable.
Waiting...
Build multi-agent systems where a coordinator delegates work to specialists. Agents communicate through mailboxes with built-in threading and read receipts.
Get progressive updates as the model generates structured output. Build UIs that show results row-by-row instead of waiting for the full response.
const stream = await mr.responses.streamJSON<{
reviews: Review[];
}>(request);
for await (const event of stream) {
if (event.type === "update") {
// Render rows as they arrive
renderReviews(event.payload.reviews);
}
}
const final = await stream.collect();use modelrelay::{Client, StructuredRecordKind};
let stream = client.responses()
.stream_json::<Reviews>(request)
.await?;
while let Some(event) = stream.next().await? {
if event.kind == StructuredRecordKind::Update {
// Render rows as they arrive
render_reviews(&event.payload.reviews);
}
}
let final_result = stream.collect().await?;stream, _ := client.Responses.StreamJSON[Reviews](
ctx, request,
)
for event := range stream.Events() {
if event.Type == sdk.StructuredRecordTypeUpdate {
// Render rows as they arrive
renderReviews(event.Payload.Reviews)
}
}
final, _ := stream.Collect(ctx)Use your existing auth stack, then mint ModelRelay customer tokens from your backend. Keep identity in your hands while we meter usage.
Charge customers $49/month. Give them $30 of API usage. Keep the $18 difference.
Live benchmarks updated every 60s.