full_azure.toml 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. # A config which overrides all instances of `openai` with `azure` in the `r2r.toml` config
  2. [completion]
  3. provider = "litellm"
  4. concurrent_request_limit = 128
  5. [completion.generation_config]
  6. model = "azure/gpt-4o"
  7. [agent]
  8. [agent.generation_config]
  9. model = "azure/gpt-4o"
  10. [database]
  11. [database.graph_creation_settings]
  12. clustering_mode = "remote"
  13. generation_config = { model = "azure/gpt-4o-mini" }
  14. [database.graph_enrichment_settings]
  15. generation_config = { model = "azure/gpt-4o-mini" }
  16. [database.graph_search_settings]
  17. generation_config = { model = "azure/gpt-4o-mini" }
  18. [embedding]
  19. provider = "litellm"
  20. base_model = "azure/text-embedding-3-small"
  21. base_dimension = 512
  22. [file]
  23. provider = "postgres"
  24. [ingestion]
  25. provider = "unstructured_local"
  26. strategy = "auto"
  27. chunking_strategy = "by_title"
  28. new_after_n_chars = 2_048
  29. max_characters = 4_096
  30. combine_under_n_chars = 1_024
  31. overlap = 1_024
  32. document_summary_model = "azure/gpt-4o-mini"
  33. vision_img_model = "azure/gpt-4o"
  34. vision_pdf_model = "azure/gpt-4o"
  35. [ingestion.extra_parsers]
  36. pdf = "zerox"
  37. [ingestion.chunk_enrichment_settings]
  38. generation_config = { model = "azure/gpt-4o-mini" }
  39. [orchestration]
  40. provider = "hatchet"
  41. kg_creation_concurrency_limit = 32
  42. ingestion_concurrency_limit = 4
  43. kg_concurrency_limit = 8