full_azure.toml 1.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. # A config which overrides all instances of `openai` with `azure` in the `r2r.toml` config
  2. [completion]
  3. provider = "litellm"
  4. concurrent_request_limit = 128
  5. [completion.generation_config]
  6. model = "azure/gpt-4o"
  7. [agent]
  8. [agent.generation_config]
  9. model = "azure/gpt-4o"
  10. [database]
  11. [database.graph_creation_settings]
  12. clustering_mode = "remote"
  13. generation_config = { model = "azure/gpt-4o-mini" }
  14. [database.graph_entity_deduplication_settings]
  15. generation_config = { model = "azure/gpt-4o-mini" }
  16. [database.graph_enrichment_settings]
  17. generation_config = { model = "azure/gpt-4o-mini" }
  18. [database.graph_search_settings]
  19. generation_config = { model = "azure/gpt-4o-mini" }
  20. [embedding]
  21. provider = "litellm"
  22. base_model = "azure/text-embedding-3-small"
  23. base_dimension = 512
  24. [file]
  25. provider = "postgres"
  26. [ingestion]
  27. provider = "unstructured_local"
  28. strategy = "auto"
  29. chunking_strategy = "by_title"
  30. new_after_n_chars = 2_048
  31. max_characters = 4_096
  32. combine_under_n_chars = 1_024
  33. overlap = 1_024
  34. document_summary_model = "azure/gpt-4o-mini"
  35. vision_img_model = "azure/gpt-4o"
  36. vision_pdf_model = "azure/gpt-4o"
  37. [ingestion.extra_parsers]
  38. pdf = "zerox"
  39. [ingestion.chunk_enrichment_settings]
  40. generation_config = { model = "azure/gpt-4o-mini" }
  41. [orchestration]
  42. provider = "hatchet"
  43. kg_creation_concurrency_limit = 32
  44. ingestion_concurrency_limit = 4
  45. kg_concurrency_limit = 8