typescriptintermediate

Pinecone Vector Store Operations

Store and query vector embeddings with Pinecone for semantic search and similarity matching.

typescript
import { Pinecone } from '@pinecone-database/pinecone';
import OpenAI from 'openai';

const pc = new Pinecone();
const openai = new OpenAI();
const index = pc.index('snippets');

async function getEmbedding(text: string): Promise<number[]> {
  const response = await openai.embeddings.create({
    model: 'text-embedding-3-small',
    input: text,
  });
  return response.data[0].embedding;
}

export async function upsertDocument(id: string, text: string, metadata: Record<string, string>) {
  const embedding = await getEmbedding(text);
  await index.upsert([{ id, values: embedding, metadata: { ...metadata, text } }]);
}

export async function search(query: string, topK = 5) {
  const embedding = await getEmbedding(query);
  const results = await index.query({
    vector: embedding,
    topK,
    includeMetadata: true,
  });
  return results.matches?.map((m) => ({
    id: m.id,
    score: m.score,
    text: m.metadata?.text as string,
  })) ?? [];
}

Sponsored

Try Pinecone — Serverless Vector Database

Use Cases

  • Semantic search engines
  • Recommendation systems
  • RAG retrieval

Tags

Related Snippets

Similar patterns you can reuse in the same workflow.