makechain.ts 1.3 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. import { OpenAI } from 'langchain/llms/openai';
  2. import { PineconeStore } from 'langchain/vectorstores/pinecone';
  3. import { ConversationalRetrievalQAChain } from 'langchain/chains';
  4. const CONDENSE_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
  5. Chat History:
  6. {chat_history}
  7. Follow Up Input: {question}
  8. Standalone question:`;
  9. const QA_PROMPT = `You are a helpful AI assistant. Use the following pieces of context to answer the question at the end.
  10. If you don't know the answer, just say you don't know. DO NOT try to make up an answer.
  11. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
  12. {context}
  13. Question: {question}
  14. Helpful answer in markdown:`;
  15. export const makeChain = (vectorstore: PineconeStore) => {
  16. const model = new OpenAI({
  17. temperature: 0, // increase temepreature to get more creative answers
  18. modelName: 'gpt-3.5-turbo', //change this to gpt-4 if you have access
  19. });
  20. const chain = ConversationalRetrievalQAChain.fromLLM(
  21. model,
  22. vectorstore.asRetriever(),
  23. {
  24. qaTemplate: QA_PROMPT,
  25. questionGeneratorTemplate: CONDENSE_PROMPT,
  26. returnSourceDocuments: true, //The number of source documents returned is 4 by default
  27. },
  28. );
  29. return chain;
  30. };