Learn how to render custom markdown responses in the C1 UI.
This guide assumes that you have completed the Quickstart.
If you’re using interceptors, or guardrails, you might want to return a custom response instead of having
the LLM generate a response for a user query. For example, you might want to return a fixed response when a user requests for some PII instead of passing it to
the LLM.
To do this, you can use the makeC1Response function to create a c1Response object, and then use the writeCustomMarkdown method to write the custom response to
the response object:
1
Create a c1Response object
Use the makeC1Response function to create a c1Response object by importing it from the @thesysai/genui-sdk package, and start writing the LLM response
content to this object:
app/api/chat/route.ts
import { NextRequest, NextResponse } from "next/server";import OpenAI from "openai";import type { ChatCompletionMessageParam } from "openai/resources.mjs";import { transformStream } from "@crayonai/stream";import { getMessageStore } from "./messageStore";import { makeC1Response } from "@thesysai/genui-sdk/server";export async function POST(req: NextRequest) { const c1Response = makeC1Response(); const { prompt, threadId, responseId } = (await req.json()) as { prompt: ChatCompletionMessageParam; threadId: string; responseId: string; }; const client = new OpenAI({ baseURL: "https://api.thesys.dev/v1/embed", apiKey: process.env.THESYS_API_KEY, // Use the API key you created in the previous step }); const messageStore = getMessageStore(threadId); messageStore.addMessage(prompt); const llmStream = await client.chat.completions.create({ model: "c1-nightly", messages: messageStore.getOpenAICompatibleMessageList(), stream: true, }); // Unwrap the OpenAI stream to a C1 stream transformStream( llmStream, (chunk) => { const contentDelta = chunk.choices[0].delta.content; if (contentDelta) { c1Response.writeContent(contentDelta); } return contentDelta; }, { onEnd: ({ accumulated }) => { c1Response.end(); // This is necessary to stop showing the "loading" state once the response is done streaming. const message = accumulated.filter((chunk) => chunk).join(""); messageStore.addMessage({ id: responseId, role: "assistant", content: message, }); }, } ) as ReadableStream<string>; return new NextResponse(c1Response.responseStream, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache, no-transform", Connection: "keep-alive", }, });}
2
Write a custom markdown response to the response object
To add a custom markdown response, use the writeCustomMarkdown method defined on the c1Response object:
When present in c1Response, custom markdown responses take priority over LLM responses on the UI (ie: they will be the only thing rendered when present in the response),
even if the LLM response is also present in c1Response.
Therefore, although not strictly necessary, it is recommended to return early when using custom markdown responses to avoid invoking the C1 API. This can prevent
unnecessary token usage.
app/api/chat/route.ts
import { NextRequest, NextResponse } from "next/server";import OpenAI from "openai";import type { ChatCompletionMessageParam } from "openai/resources.mjs";import { transformStream } from "@crayonai/stream";import { getMessageStore } from "./messageStore";import { makeC1Response } from "@thesysai/genui-sdk/server"; // This is a hypothetical function that validates the user query based on some criteria, such as identifying if it contains or requests PII.import { checkForPII } from "./guardrails";export async function POST(req: NextRequest) { const c1Response = makeC1Response(); const { prompt, threadId, responseId } = (await req.json()) as { prompt: ChatCompletionMessageParam; threadId: string; responseId: string; }; if (checkForPII(prompt)) { c1Response.writeCustomMarkdown( "I'm unable to assist with this request because it contains, or asks for, PII (*personally identifiable information*). Please remove any sensitive information and try again." ); c1Response.end(); // This is necessary to stop showing the "loading" state once the response is done streaming. return new NextResponse(c1Response.responseStream, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache, no-transform", Connection: "keep-alive", }, }); } const client = new OpenAI({ baseURL: "https://api.thesys.dev/v1/embed", apiKey: process.env.THESYS_API_KEY, // Use the API key you created in the previous step }); const messageStore = getMessageStore(threadId); messageStore.addMessage(prompt); const llmStream = await client.chat.completions.create({ model: "c1-nightly", messages: messageStore.getOpenAICompatibleMessageList(), stream: true, }); // Unwrap the OpenAI stream to a C1 stream transformStream( llmStream, (chunk) => { const contentDelta = chunk.choices[0].delta.content; if (contentDelta) { c1Response.writeContent(contentDelta); } return contentDelta; }, { onEnd: ({ accumulated }) => { c1Response.end(); // This is necessary to stop showing the "loading" state once the response is done streaming. const message = accumulated.filter((chunk) => chunk).join(""); messageStore.addMessage({ id: responseId, role: "assistant", content: message, }); }, } ) as ReadableStream<string>; return new NextResponse(c1Response.responseStream, { headers: { "Content-Type": "text/event-stream", "Cache-Control": "no-cache, no-transform", Connection: "keep-alive", }, });}
3
Test it out
Your custom response will now be rendered in the UI when the guardrail is triggered: