full_local_llm.toml 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. [agent]
  2. system_instruction_name = "rag_agent"
  3. tool_names = ["local_search"]
  4. [agent.generation_config]
  5. model = "ollama/llama3.1"
  6. [completion]
  7. provider = "litellm"
  8. concurrent_request_limit = 1
  9. [completion.generation_config]
  10. model = "ollama/llama3.1"
  11. temperature = 0.1
  12. top_p = 1
  13. max_tokens_to_sample = 1_024
  14. stream = false
  15. add_generation_kwargs = { }
  16. [database]
  17. provider = "postgres"
  18. [database.graph_creation_settings]
  19. clustering_mode = "remote"
  20. graph_entity_description_prompt = "graphrag_entity_description"
  21. entity_types = [] # if empty, all entities are extracted
  22. relation_types = [] # if empty, all relations are extracted
  23. fragment_merge_count = 4 # number of fragments to merge into a single extraction
  24. max_knowledge_relationships = 100
  25. max_description_input_length = 65536
  26. generation_config = { model = "ollama/llama3.1" } # and other params, model used for relationshipt extraction
  27. [database.graph_entity_deduplication_settings]
  28. graph_entity_deduplication_type = "by_name"
  29. graph_entity_deduplication_prompt = "graphrag_entity_deduplication"
  30. max_description_input_length = 65536
  31. generation_config = { model = "ollama/llama3.1" } # and other params, model used for deduplication
  32. [database.graph_enrichment_settings]
  33. community_reports_prompt = "graphrag_community_reports"
  34. max_summary_input_length = 65536
  35. generation_config = { model = "ollama/llama3.1" } # and other params, model used for node description and graph clustering
  36. leiden_params = {}
  37. [database.graph_search_settings]
  38. generation_config = { model = "ollama/llama3.1" }
  39. [embedding]
  40. provider = "ollama"
  41. base_model = "mxbai-embed-large"
  42. base_dimension = 1_024
  43. batch_size = 128
  44. add_title_as_prefix = true
  45. concurrent_request_limit = 2
  46. [ingestion]
  47. provider = "unstructured_local"
  48. strategy = "auto"
  49. chunking_strategy = "by_title"
  50. new_after_n_chars = 512
  51. max_characters = 1_024
  52. combine_under_n_chars = 128
  53. overlap = 20
  54. chunks_for_document_summary = 16
  55. document_summary_model = "ollama/llama3.1"
  56. [orchestration]
  57. provider = "hatchet"