local_llm.toml 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. [agent]
  2. system_instruction_name = "rag_agent"
  3. tool_names = ["local_search"]
  4. [agent.generation_config]
  5. model = "ollama/llama3.1"
  6. [completion]
  7. provider = "litellm"
  8. concurrent_request_limit = 1
  9. [completion.generation_config]
  10. model = "ollama/llama3.1"
  11. temperature = 0.1
  12. top_p = 1
  13. max_tokens_to_sample = 1_024
  14. stream = false
  15. add_generation_kwargs = { }
  16. [embedding]
  17. provider = "ollama"
  18. base_model = "mxbai-embed-large"
  19. base_dimension = 1_024
  20. batch_size = 128
  21. add_title_as_prefix = true
  22. concurrent_request_limit = 2
  23. [database]
  24. provider = "postgres"
  25. [database.graph_creation_settings]
  26. graph_entity_description_prompt = "graphrag_entity_description"
  27. entity_types = [] # if empty, all entities are extracted
  28. relation_types = [] # if empty, all relations are extracted
  29. fragment_merge_count = 4 # number of fragments to merge into a single extraction
  30. max_knowledge_relationships = 100
  31. max_description_input_length = 65536
  32. generation_config = { model = "ollama/llama3.1" } # and other params, model used for relationshipt extraction
  33. [database.graph_enrichment_settings]
  34. community_reports_prompt = "graphrag_community_reports"
  35. max_summary_input_length = 65536
  36. generation_config = { model = "ollama/llama3.1" } # and other params, model used for node description and graph clustering
  37. leiden_params = {}
  38. [database.graph_search_settings]
  39. generation_config = { model = "ollama/llama3.1" }
  40. [orchestration]
  41. provider = "simple"
  42. [ingestion]
  43. vision_img_model = "ollama/llama3.2-vision"
  44. vision_pdf_model = "ollama/llama3.2-vision"
  45. chunks_for_document_summary = 16
  46. document_summary_model = "ollama/llama3.1"
  47. [ingestion.extra_parsers]
  48. pdf = "zerox"