chat.ts 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. import type { NextApiRequest, NextApiResponse } from 'next';
  2. import { OpenAIEmbeddings } from 'langchain/embeddings/openai';
  3. import { PineconeStore } from 'langchain/vectorstores/pinecone';
  4. import { makeChain } from '@/utils/makechain';
  5. import { pinecone } from '@/utils/pinecone-client';
  6. import { PINECONE_INDEX_NAME, PINECONE_NAME_SPACE } from '@/config/pinecone';
  7. export default async function handler(
  8. req: NextApiRequest,
  9. res: NextApiResponse,
  10. ) {
  11. const { question, history } = req.body;
  12. console.log('question', question);
  13. //only accept post requests
  14. if (req.method !== 'POST') {
  15. res.status(405).json({ error: 'Method not allowed' });
  16. return;
  17. }
  18. if (!question) {
  19. return res.status(400).json({ message: 'No question in the request' });
  20. }
  21. // OpenAI recommends replacing newlines with spaces for best results
  22. const sanitizedQuestion = question.trim().replaceAll('\n', ' ');
  23. try {
  24. const index = pinecone.Index(PINECONE_INDEX_NAME);
  25. /* create vectorstore*/
  26. const vectorStore = await PineconeStore.fromExistingIndex(
  27. new OpenAIEmbeddings({}),
  28. {
  29. pineconeIndex: index,
  30. textKey: 'text',
  31. namespace: PINECONE_NAME_SPACE, //namespace comes from your config folder
  32. },
  33. );
  34. //create chain
  35. const chain = makeChain(vectorStore);
  36. //Ask a question using chat history
  37. const response = await chain.call({
  38. question: sanitizedQuestion,
  39. chat_history: history || [],
  40. });
  41. console.log('response', response);
  42. res.status(200).json(response);
  43. } catch (error: any) {
  44. console.log('error', error);
  45. res.status(500).json({ error: error.message || 'Something went wrong' });
  46. }
  47. }