typescriptadvanced

LangChain RAG Chain Pipeline

Build a retrieval-augmented generation chain with LangChain using vector store retrieval and prompt templates.

typescript
import { ChatOpenAI } from '@langchain/openai';
import { PromptTemplate } from '@langchain/core/prompts';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { RunnableSequence } from '@langchain/core/runnables';

const model = new ChatOpenAI({ modelName: 'gpt-4o', temperature: 0 });

const ragPrompt = PromptTemplate.fromTemplate(`
Answer the question based only on the provided context.
If the context doesn't contain the answer, say "I don't know."

Context: {context}
Question: {question}
Answer:`);

// Simulated retriever
async function retrieve(query: string): Promise<string> {
  // Replace with actual vector store retrieval
  return 'Retrieved context documents here...';
}

const chain = RunnableSequence.from([
  {
    context: (input: { question: string }) => retrieve(input.question),
    question: (input: { question: string }) => input.question,
  },
  ragPrompt,
  model,
  new StringOutputParser(),
]);

// const answer = await chain.invoke({ question: 'What is RAG?' });

Sponsored

Try LangSmith — Debug & Monitor LLM Apps

Use Cases

  • Document Q&A
  • Knowledge base chatbots
  • Semantic search answers

Tags

Related Snippets

Similar patterns you can reuse in the same workflow.