[agent] system_instruction_name = "rag_agent" tool_names = ["local_search"] [agent.generation_config] model = "ollama/llama3.1" [completion] provider = "litellm" concurrent_request_limit = 1 [completion.generation_config] model = "ollama/llama3.1" temperature = 0.1 top_p = 1 max_tokens_to_sample = 1_024 stream = false add_generation_kwargs = { } [database] provider = "postgres" [database.graph_creation_settings] clustering_mode = "remote" graph_entity_description_prompt = "graphrag_entity_description" entity_types = [] # if empty, all entities are extracted relation_types = [] # if empty, all relations are extracted fragment_merge_count = 4 # number of fragments to merge into a single extraction max_knowledge_relationships = 100 max_description_input_length = 65536 generation_config = { model = "ollama/llama3.1" } # and other params, model used for relationshipt extraction [database.graph_entity_deduplication_settings] graph_entity_deduplication_type = "by_name" graph_entity_deduplication_prompt = "graphrag_entity_deduplication" max_description_input_length = 65536 generation_config = { model = "ollama/llama3.1" } # and other params, model used for deduplication [database.graph_enrichment_settings] community_reports_prompt = "graphrag_community_reports" max_summary_input_length = 65536 generation_config = { model = "ollama/llama3.1" } # and other params, model used for node description and graph clustering leiden_params = {} [database.graph_search_settings] generation_config = { model = "ollama/llama3.1" } [embedding] provider = "ollama" base_model = "mxbai-embed-large" base_dimension = 1_024 batch_size = 128 add_title_as_prefix = true concurrent_request_limit = 2 [ingestion] provider = "unstructured_local" strategy = "auto" chunking_strategy = "by_title" new_after_n_chars = 512 max_characters = 1_024 combine_under_n_chars = 128 overlap = 20 chunks_for_document_summary = 16 document_summary_model = "ollama/llama3.1" [orchestration] provider = "hatchet"