local_llm.toml 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. [agent]
  2. system_instruction_name = "rag_agent"
  3. tool_names = ["local_search"]
  4. [agent.generation_config]
  5. model = "ollama/llama3.1"
  6. [completion]
  7. provider = "litellm"
  8. concurrent_request_limit = 1
  9. [completion.generation_config]
  10. model = "ollama/llama3.1"
  11. temperature = 0.1
  12. top_p = 1
  13. max_tokens_to_sample = 1_024
  14. stream = false
  15. add_generation_kwargs = { }
  16. [embedding]
  17. provider = "ollama"
  18. base_model = "mxbai-embed-large"
  19. base_dimension = 1_024
  20. batch_size = 128
  21. add_title_as_prefix = true
  22. concurrent_request_limit = 2
  23. [database]
  24. provider = "postgres"
  25. [database.graph_creation_settings]
  26. graph_entity_description_prompt = "graphrag_entity_description"
  27. entity_types = [] # if empty, all entities are extracted
  28. relation_types = [] # if empty, all relations are extracted
  29. fragment_merge_count = 4 # number of fragments to merge into a single extraction
  30. max_knowledge_relationships = 100
  31. max_description_input_length = 65536
  32. generation_config = { model = "ollama/llama3.1" } # and other params, model used for relationshipt extraction
  33. [database.graph_entity_deduplication_settings]
  34. graph_entity_deduplication_type = "by_name"
  35. graph_entity_deduplication_prompt = "graphrag_entity_deduplication"
  36. max_description_input_length = 65536
  37. generation_config = { model = "ollama/llama3.1" } # and other params, model used for deduplication
  38. [database.graph_enrichment_settings]
  39. community_reports_prompt = "graphrag_community_reports"
  40. max_summary_input_length = 65536
  41. generation_config = { model = "ollama/llama3.1" } # and other params, model used for node description and graph clustering
  42. leiden_params = {}
  43. [database.graph_search_settings]
  44. generation_config = { model = "ollama/llama3.1" }
  45. [orchestration]
  46. provider = "simple"
  47. [ingestion]
  48. vision_img_model = "ollama/llama3.2-vision"
  49. vision_pdf_model = "ollama/llama3.2-vision"
  50. [ingestion.extra_parsers]
  51. pdf = "zerox"
  52. chunks_for_document_summary = 16
  53. document_summary_model = "ollama/llama3.1"