OpenAI Assistants API with Threads
Create persistent conversation threads with OpenAI Assistants API for stateful multi-turn interactions.
import OpenAI from 'openai';
const openai = new OpenAI();
export async function createAssistant() {
return openai.beta.assistants.create({
name: 'Code Reviewer',
instructions: 'You are a senior developer. Review code for bugs, security issues, and best practices.',
model: 'gpt-4o',
tools: [{ type: 'code_interpreter' }],
});
}
export async function chat(assistantId: string, threadId: string | null, message: string) {
// Create or reuse thread
const thread = threadId
? { id: threadId }
: await openai.beta.threads.create();
// Add user message
await openai.beta.threads.messages.create(thread.id, {
role: 'user',
content: message,
});
// Run and poll for completion
const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistantId,
});
if (run.status === 'completed') {
const messages = await openai.beta.threads.messages.list(thread.id);
const last = messages.data[0];
const text = last.content[0].type === 'text' ? last.content[0].text.value : '';
return { threadId: thread.id, response: text };
}
throw new Error(`Run failed with status: ${run.status}`);
}Use Cases
- Customer support bots
- Tutoring assistants
- Code review helpers
Tags
Related Snippets
Similar patterns you can reuse in the same workflow.
OpenAI Chat Completion with Streaming
Stream GPT responses token-by-token using the OpenAI SDK with async iteration.
Generate Text Embeddings with OpenAI
Create vector embeddings for semantic search and similarity matching using text-embedding-3-small.
RAG Pipeline (Retrieve + Augment + Generate)
Minimal RAG implementation: embed a query, retrieve top-k chunks, inject into prompt.
OpenAI Tool Calling (Function Calling)
Define tools for GPT to call, parse the response, execute the function, and return results.