documents_router.py 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350
  1. import base64
  2. import logging
  3. import mimetypes
  4. import textwrap
  5. from datetime import datetime
  6. from io import BytesIO
  7. from typing import Any, Optional
  8. from urllib.parse import quote
  9. from uuid import UUID
  10. from fastapi import Body, Depends, File, Form, Path, Query, UploadFile
  11. from fastapi.background import BackgroundTasks
  12. from fastapi.responses import FileResponse, StreamingResponse
  13. from pydantic import Json
  14. from core.base import (
  15. IngestionConfig,
  16. R2RException,
  17. SearchMode,
  18. SearchSettings,
  19. UnprocessedChunk,
  20. Workflow,
  21. generate_document_id,
  22. generate_id,
  23. select_search_filters,
  24. )
  25. from core.base.abstractions import GraphCreationSettings, StoreType
  26. from core.base.api.models import (
  27. GenericBooleanResponse,
  28. WrappedBooleanResponse,
  29. WrappedChunksResponse,
  30. WrappedCollectionsResponse,
  31. WrappedDocumentResponse,
  32. WrappedDocumentSearchResponse,
  33. WrappedDocumentsResponse,
  34. WrappedEntitiesResponse,
  35. WrappedGenericMessageResponse,
  36. WrappedIngestionResponse,
  37. WrappedRelationshipsResponse,
  38. )
  39. from core.utils import update_settings_from_dict
  40. from shared.abstractions import IngestionMode
  41. from ...abstractions import R2RProviders, R2RServices
  42. from ...config import R2RConfig
  43. from .base_router import BaseRouterV3
  44. logger = logging.getLogger()
  45. MAX_CHUNKS_PER_REQUEST = 1024 * 100
  46. def merge_search_settings(
  47. base: SearchSettings, overrides: SearchSettings
  48. ) -> SearchSettings:
  49. # Convert both to dict
  50. base_dict = base.model_dump()
  51. overrides_dict = overrides.model_dump(exclude_unset=True)
  52. # Update base_dict with values from overrides_dict
  53. # This ensures that any field set in overrides takes precedence
  54. for k, v in overrides_dict.items():
  55. base_dict[k] = v
  56. # Construct a new SearchSettings from the merged dict
  57. return SearchSettings(**base_dict)
  58. def merge_ingestion_config(
  59. base: IngestionConfig, overrides: IngestionConfig
  60. ) -> IngestionConfig:
  61. base_dict = base.model_dump()
  62. overrides_dict = overrides.model_dump(exclude_unset=True)
  63. for k, v in overrides_dict.items():
  64. base_dict[k] = v
  65. return IngestionConfig(**base_dict)
  66. class DocumentsRouter(BaseRouterV3):
  67. def __init__(
  68. self,
  69. providers: R2RProviders,
  70. services: R2RServices,
  71. config: R2RConfig,
  72. ):
  73. logging.info("Initializing DocumentsRouter")
  74. super().__init__(providers, services, config)
  75. self._register_workflows()
  76. def _prepare_search_settings(
  77. self,
  78. auth_user: Any,
  79. search_mode: SearchMode,
  80. search_settings: Optional[SearchSettings],
  81. ) -> SearchSettings:
  82. """Prepare the effective search settings based on the provided
  83. search_mode, optional user-overrides in search_settings, and applied
  84. filters."""
  85. if search_mode != SearchMode.custom:
  86. # Start from mode defaults
  87. effective_settings = SearchSettings.get_default(search_mode.value)
  88. if search_settings:
  89. # Merge user-provided overrides
  90. effective_settings = merge_search_settings(
  91. effective_settings, search_settings
  92. )
  93. else:
  94. # Custom mode: use provided settings or defaults
  95. effective_settings = search_settings or SearchSettings()
  96. # Apply user-specific filters
  97. effective_settings.filters = select_search_filters(
  98. auth_user, effective_settings
  99. )
  100. return effective_settings
  101. # TODO - Remove this legacy method
  102. def _register_workflows(self):
  103. print(self.providers.orchestration.config.provider)
  104. self.providers.orchestration.register_workflows(
  105. Workflow.INGESTION,
  106. self.services.ingestion,
  107. {
  108. "ingest-files": (
  109. #"Ingest files task queued successfully."
  110. "Document created and ingested successfully."
  111. if self.providers.orchestration.config.provider != "simple"
  112. else "Document created and ingested successfully."
  113. ),
  114. "ingest-chunks": (
  115. #"Ingest chunks task queued successfully."
  116. "Document created and ingested successfully."
  117. if self.providers.orchestration.config.provider != "simple"
  118. else "Document created and ingested successfully."
  119. ),
  120. "update-chunk": (
  121. #"Update chunk task queued successfully."
  122. "Chunk update completed successfully."
  123. if self.providers.orchestration.config.provider != "simple"
  124. else "Chunk update completed successfully."
  125. ),
  126. "update-document-metadata": (
  127. #"Update document metadata task queued successfully."
  128. "Document metadata update completed successfully."
  129. if self.providers.orchestration.config.provider != "simple"
  130. else "Document metadata update completed successfully."
  131. ),
  132. "create-vector-index": (
  133. #"Vector index creation task queued successfully."
  134. "Vector index creation task completed successfully."
  135. if self.providers.orchestration.config.provider != "simple"
  136. else "Vector index creation task completed successfully."
  137. ),
  138. "delete-vector-index": (
  139. #"Vector index deletion task queued successfully."
  140. "Vector index deletion task completed successfully."
  141. if self.providers.orchestration.config.provider != "simple"
  142. else "Vector index deletion task completed successfully."
  143. ),
  144. "select-vector-index": (
  145. #"Vector index selection task queued successfully."
  146. "Vector index selection task completed successfully."
  147. if self.providers.orchestration.config.provider != "simple"
  148. else "Vector index selection task completed successfully."
  149. ),
  150. },
  151. )
  152. def _prepare_ingestion_config(
  153. self,
  154. ingestion_mode: IngestionMode,
  155. ingestion_config: Optional[IngestionConfig],
  156. ) -> IngestionConfig:
  157. # If not custom, start from defaults
  158. if ingestion_mode != IngestionMode.custom:
  159. effective_config = IngestionConfig.get_default(
  160. ingestion_mode.value, app=self.providers.auth.config.app
  161. )
  162. if ingestion_config:
  163. effective_config = merge_ingestion_config(
  164. effective_config, ingestion_config
  165. )
  166. else:
  167. effective_config = ingestion_config or IngestionConfig(
  168. app=self.providers.auth.config.app
  169. )
  170. effective_config.validate_config()
  171. return effective_config
  172. def _setup_routes(self):
  173. @self.router.post(
  174. "/documents",
  175. dependencies=[Depends(self.rate_limit_dependency)],
  176. status_code=202,
  177. summary="Create a new document",
  178. openapi_extra={
  179. "x-codeSamples": [
  180. {
  181. "lang": "Python",
  182. "source": textwrap.dedent("""
  183. from r2r import R2RClient
  184. client = R2RClient()
  185. # when using auth, do client.login(...)
  186. response = client.documents.create(
  187. file_path="pg_essay_1.html",
  188. metadata={"metadata_1":"some random metadata"},
  189. id=None
  190. )
  191. """),
  192. },
  193. {
  194. "lang": "JavaScript",
  195. "source": textwrap.dedent("""
  196. const { r2rClient } = require("r2r-js");
  197. const client = new r2rClient();
  198. function main() {
  199. const response = await client.documents.create({
  200. file: { path: "examples/data/marmeladov.txt", name: "marmeladov.txt" },
  201. metadata: { title: "marmeladov.txt" },
  202. });
  203. }
  204. main();
  205. """),
  206. },
  207. {
  208. "lang": "cURL",
  209. "source": textwrap.dedent("""
  210. curl -X POST "https://api.example.com/v3/documents" \\
  211. -H "Content-Type: multipart/form-data" \\
  212. -H "Authorization: Bearer YOUR_API_KEY" \\
  213. -F "file=@pg_essay_1.html;type=text/html" \\
  214. -F 'metadata={}' \\
  215. -F 'id=null'
  216. """),
  217. },
  218. ]
  219. },
  220. )
  221. @self.base_endpoint
  222. async def create_document(
  223. file: Optional[UploadFile] = File(
  224. None,
  225. description="The file to ingest. Exactly one of file, raw_text, or chunks must be provided.",
  226. ),
  227. raw_text: Optional[str] = Form(
  228. None,
  229. description="Raw text content to ingest. Exactly one of file, raw_text, or chunks must be provided.",
  230. ),
  231. chunks: Optional[Json[list[str]]] = Form(
  232. None,
  233. description="Pre-processed text chunks to ingest. Exactly one of file, raw_text, or chunks must be provided.",
  234. ),
  235. id: Optional[UUID] = Form(
  236. None,
  237. description="The ID of the document. If not provided, a new ID will be generated.",
  238. ),
  239. collection_ids: Optional[Json[list[UUID]]] = Form(
  240. None,
  241. description="Collection IDs to associate with the document. If none are provided, the document will be assigned to the user's default collection.",
  242. ),
  243. metadata: Optional[Json[dict]] = Form(
  244. None,
  245. description="Metadata to associate with the document, such as title, description, or custom fields.",
  246. ),
  247. ingestion_mode: IngestionMode = Form(
  248. default=IngestionMode.custom,
  249. description=(
  250. "Ingestion modes:\n"
  251. "- `hi-res`: Thorough ingestion with full summaries and enrichment.\n"
  252. "- `ocr`: OCR via Mistral and full summaries.\n"
  253. "- `fast`: Quick ingestion with minimal enrichment and no summaries.\n"
  254. "- `custom`: Full control via `ingestion_config`.\n\n"
  255. "If `filters` or `limit` (in `ingestion_config`) are provided alongside `hi-res` or `fast`, "
  256. "they will override the default settings for that mode."
  257. ),
  258. ),
  259. ingestion_config: Optional[Json[IngestionConfig]] = Form(
  260. None,
  261. description="An optional dictionary to override the default chunking configuration for the ingestion process. If not provided, the system will use the default server-side chunking configuration.",
  262. ),
  263. run_with_orchestration: Optional[bool] = Form(
  264. True,
  265. description="Whether or not ingestion runs with orchestration, default is `True`. When set to `False`, the ingestion process will run synchronous and directly return the result.",
  266. ),
  267. auth_user=Depends(self.providers.auth.auth_wrapper()),
  268. ) -> WrappedIngestionResponse:
  269. """
  270. Creates a new Document object from an input file, text content, or chunks. The chosen `ingestion_mode` determines
  271. how the ingestion process is configured:
  272. **Ingestion Modes:**
  273. - `hi-res`: Comprehensive parsing and enrichment, including summaries and possibly more thorough parsing.
  274. - `fast`: Speed-focused ingestion that skips certain enrichment steps like summaries.
  275. - `custom`: Provide a full `ingestion_config` to customize the entire ingestion process.
  276. Either a file or text content must be provided, but not both. Documents are shared through `Collections` which allow for tightly specified cross-user interactions.
  277. The ingestion process runs asynchronously and its progress can be tracked using the returned
  278. task_id.
  279. """
  280. if not auth_user.is_superuser:
  281. user_document_count = (
  282. await self.services.management.documents_overview(
  283. user_ids=[auth_user.id],
  284. offset=0,
  285. limit=1,
  286. )
  287. )["total_entries"]
  288. user_max_documents = (
  289. await self.services.management.get_user_max_documents(
  290. auth_user.id
  291. )
  292. )
  293. if user_document_count >= user_max_documents:
  294. raise R2RException(
  295. status_code=403,
  296. message=f"User has reached the maximum number of documents allowed ({user_max_documents}).",
  297. )
  298. # Get chunks using the vector handler's list_chunks method
  299. user_chunk_count = (
  300. await self.services.ingestion.list_chunks(
  301. filters={"owner_id": {"$eq": str(auth_user.id)}},
  302. offset=0,
  303. limit=1,
  304. )
  305. )["total_entries"]
  306. user_max_chunks = (
  307. await self.services.management.get_user_max_chunks(
  308. auth_user.id
  309. )
  310. )
  311. if user_chunk_count >= user_max_chunks:
  312. raise R2RException(
  313. status_code=403,
  314. message=f"User has reached the maximum number of chunks allowed ({user_max_chunks}).",
  315. )
  316. user_collections_count = (
  317. await self.services.management.collections_overview(
  318. user_ids=[auth_user.id],
  319. offset=0,
  320. limit=1,
  321. )
  322. )["total_entries"]
  323. user_max_collections = (
  324. await self.services.management.get_user_max_collections(
  325. auth_user.id
  326. )
  327. )
  328. if user_collections_count >= user_max_collections: # type: ignore
  329. raise R2RException(
  330. status_code=403,
  331. message=f"User has reached the maximum number of collections allowed ({user_max_collections}).",
  332. )
  333. effective_ingestion_config = self._prepare_ingestion_config(
  334. ingestion_mode=ingestion_mode,
  335. ingestion_config=ingestion_config,
  336. )
  337. if not file and not raw_text and not chunks:
  338. raise R2RException(
  339. status_code=422,
  340. message="Either a `file`, `raw_text`, or `chunks` must be provided.",
  341. )
  342. if (
  343. (file and raw_text)
  344. or (file and chunks)
  345. or (raw_text and chunks)
  346. ):
  347. raise R2RException(
  348. status_code=422,
  349. message="Only one of `file`, `raw_text`, or `chunks` may be provided.",
  350. )
  351. # Check if the user is a superuser
  352. metadata = metadata or {}
  353. if chunks:
  354. if len(chunks) == 0:
  355. raise R2RException("Empty list of chunks provided", 400)
  356. if len(chunks) > MAX_CHUNKS_PER_REQUEST:
  357. raise R2RException(
  358. f"Maximum of {MAX_CHUNKS_PER_REQUEST} chunks per request",
  359. 400,
  360. )
  361. document_id = id or generate_document_id(
  362. "".join(chunks), auth_user.id
  363. )
  364. # FIXME: Metadata doesn't seem to be getting passed through
  365. raw_chunks_for_doc = [
  366. UnprocessedChunk(
  367. text=chunk,
  368. metadata=metadata,
  369. id=generate_id(),
  370. )
  371. for chunk in chunks
  372. ]
  373. # Prepare workflow input
  374. workflow_input = {
  375. "document_id": str(document_id),
  376. "chunks": [
  377. chunk.model_dump(mode="json")
  378. for chunk in raw_chunks_for_doc
  379. ],
  380. "metadata": metadata, # Base metadata for the document
  381. "user": auth_user.model_dump_json(),
  382. "ingestion_config": effective_ingestion_config.model_dump(
  383. mode="json"
  384. ),
  385. }
  386. if run_with_orchestration:
  387. try:
  388. # Run ingestion with orchestration
  389. raw_message = (
  390. await self.providers.orchestration.run_workflow(
  391. "ingest-chunks",
  392. {"request": workflow_input},
  393. options={
  394. "additional_metadata": {
  395. "document_id": str(document_id),
  396. }
  397. },
  398. )
  399. )
  400. raw_message["document_id"] = str(document_id)
  401. return raw_message # type: ignore
  402. except Exception as e: # TODO: Need to find specific errors that we should be excepting (gRPC most likely?)
  403. logger.error(
  404. f"Error running orchestrated ingestion: {e} \n\nAttempting to run without orchestration."
  405. )
  406. logger.info("Running chunk ingestion without orchestration.")
  407. from core.main.orchestration import simple_ingestion_factory
  408. simple_ingestor = simple_ingestion_factory(
  409. self.services.ingestion
  410. )
  411. await simple_ingestor["ingest-chunks"](workflow_input)
  412. return { # type: ignore
  413. "message": "Document created and ingested successfully.",
  414. "document_id": str(document_id),
  415. "task_id": None,
  416. }
  417. else:
  418. if file:
  419. file_data = await self._process_file(file)
  420. if not file.filename:
  421. raise R2RException(
  422. status_code=422,
  423. message="Uploaded file must have a filename.",
  424. )
  425. file_ext = file.filename.split(".")[
  426. -1
  427. ] # e.g. "pdf", "txt"
  428. max_allowed_size = await self.services.management.get_max_upload_size_by_type(
  429. user_id=auth_user.id, file_type_or_ext=file_ext
  430. )
  431. content_length = file_data["content_length"]
  432. if content_length > max_allowed_size:
  433. raise R2RException(
  434. status_code=413, # HTTP 413: Payload Too Large
  435. message=(
  436. f"File size exceeds maximum of {max_allowed_size} bytes "
  437. f"for extension '{file_ext}'."
  438. ),
  439. )
  440. file_content = BytesIO(
  441. base64.b64decode(file_data["content"])
  442. )
  443. file_data.pop("content", None)
  444. document_id = id or generate_document_id(
  445. file_data["filename"], auth_user.id
  446. )
  447. elif raw_text:
  448. content_length = len(raw_text)
  449. file_content = BytesIO(raw_text.encode("utf-8"))
  450. document_id = id or generate_document_id(
  451. raw_text, auth_user.id
  452. )
  453. title = metadata.get("title", None)
  454. title = title + ".txt" if title else None
  455. file_data = {
  456. "filename": title or "N/A",
  457. "content_type": "text/plain",
  458. }
  459. else:
  460. raise R2RException(
  461. status_code=422,
  462. message="Either a file or content must be provided.",
  463. )
  464. workflow_input = {
  465. "file_data": file_data,
  466. "document_id": str(document_id),
  467. "collection_ids": (
  468. [str(cid) for cid in collection_ids]
  469. if collection_ids
  470. else None
  471. ),
  472. "metadata": metadata,
  473. "ingestion_config": effective_ingestion_config.model_dump(
  474. mode="json"
  475. ),
  476. "user": auth_user.model_dump_json(),
  477. "size_in_bytes": content_length,
  478. "version": "v0",
  479. }
  480. file_name = file_data["filename"]
  481. await self.providers.database.files_handler.store_file(
  482. document_id,
  483. file_name,
  484. file_content,
  485. file_data["content_type"],
  486. )
  487. await self.services.ingestion.ingest_file_ingress(
  488. file_data=workflow_input["file_data"],
  489. user=auth_user,
  490. document_id=workflow_input["document_id"],
  491. size_in_bytes=workflow_input["size_in_bytes"],
  492. metadata=workflow_input["metadata"],
  493. version=workflow_input["version"],
  494. )
  495. if run_with_orchestration:
  496. try:
  497. # TODO - Modify create_chunks so that we can add chunks to existing document
  498. workflow_result: dict[
  499. str, str | None
  500. ] = await self.providers.orchestration.run_workflow( # type: ignore
  501. "ingest-files",
  502. {"request": workflow_input},
  503. options={
  504. "additional_metadata": {
  505. "document_id": str(document_id),
  506. }
  507. },
  508. )
  509. workflow_result["document_id"] = str(document_id)
  510. return workflow_result # type: ignore
  511. except Exception as e: # TODO: Need to find specific error (gRPC most likely?)
  512. logger.error(
  513. f"Error running orchestrated ingestion: {e} \n\nAttempting to run without orchestration."
  514. )
  515. logger.info(
  516. f"Running ingestion without orchestration for file {file_name} and document_id {document_id}."
  517. )
  518. # TODO - Clean up implementation logic here to be more explicitly `synchronous`
  519. from core.main.orchestration import simple_ingestion_factory
  520. simple_ingestor = simple_ingestion_factory(self.services.ingestion)
  521. await simple_ingestor["ingest-files"](workflow_input)
  522. return { # type: ignore
  523. "message": "Document created and ingested successfully.",
  524. "document_id": str(document_id),
  525. "task_id": None,
  526. }
  527. @self.router.patch(
  528. "/documents/{id}/metadata",
  529. dependencies=[Depends(self.rate_limit_dependency)],
  530. summary="Append metadata to a document",
  531. openapi_extra={
  532. "x-codeSamples": [
  533. {
  534. "lang": "Python",
  535. "source": textwrap.dedent("""
  536. from r2r import R2RClient
  537. client = R2RClient()
  538. # when using auth, do client.login(...)
  539. response = client.documents.append_metadata(
  540. id="9fbe403b-c11c-5aae-8ade-ef22980c3ad1",
  541. metadata=[{"key": "new_key", "value": "new_value"}]
  542. )
  543. """),
  544. },
  545. {
  546. "lang": "JavaScript",
  547. "source": textwrap.dedent("""
  548. const { r2rClient } = require("r2r-js");
  549. const client = new r2rClient();
  550. function main() {
  551. const response = await client.documents.appendMetadata({
  552. id: "9fbe403b-c11c-5aae-8ade-ef22980c3ad1",
  553. metadata: [{ key: "new_key", value: "new_value" }],
  554. });
  555. }
  556. main();
  557. """),
  558. },
  559. ]
  560. },
  561. )
  562. @self.base_endpoint
  563. async def patch_metadata(
  564. id: UUID = Path(
  565. ...,
  566. description="The ID of the document to append metadata to.",
  567. ),
  568. metadata: list[dict] = Body(
  569. ...,
  570. description="Metadata to append to the document.",
  571. ),
  572. auth_user=Depends(self.providers.auth.auth_wrapper()),
  573. ) -> WrappedDocumentResponse:
  574. """Appends metadata to a document. This endpoint allows adding new metadata fields or updating existing ones."""
  575. request_user_ids = (
  576. None if auth_user.is_superuser else [auth_user.id]
  577. )
  578. documents_overview_response = (
  579. await self.services.management.documents_overview(
  580. user_ids=request_user_ids,
  581. document_ids=[id],
  582. offset=0,
  583. limit=1,
  584. )
  585. )
  586. results = documents_overview_response["results"]
  587. if len(results) == 0:
  588. raise R2RException("Document not found.", 404)
  589. return await self.services.management.update_document_metadata(
  590. document_id=id,
  591. metadata=metadata,
  592. overwrite=False,
  593. )
  594. @self.router.put(
  595. "/documents/{id}/metadata",
  596. dependencies=[Depends(self.rate_limit_dependency)],
  597. summary="Replace metadata of a document",
  598. openapi_extra={
  599. "x-codeSamples": [
  600. {
  601. "lang": "Python",
  602. "source": textwrap.dedent("""
  603. from r2r import R2RClient
  604. client = R2RClient()
  605. # when using auth, do client.login(...)
  606. response = client.documents.replace_metadata(
  607. id="9fbe403b-c11c-5aae-8ade-ef22980c3ad1",
  608. metadata=[{"key": "new_key", "value": "new_value"}]
  609. )
  610. """),
  611. },
  612. {
  613. "lang": "JavaScript",
  614. "source": textwrap.dedent("""
  615. const { r2rClient } = require("r2r-js");
  616. const client = new r2rClient();
  617. function main() {
  618. const response = await client.documents.replaceMetadata({
  619. id: "9fbe403b-c11c-5aae-8ade-ef22980c3ad1",
  620. metadata: [{ key: "new_key", value: "new_value" }],
  621. });
  622. }
  623. main();
  624. """),
  625. },
  626. ]
  627. },
  628. )
  629. @self.base_endpoint
  630. async def put_metadata(
  631. id: UUID = Path(
  632. ...,
  633. description="The ID of the document to append metadata to.",
  634. ),
  635. metadata: list[dict] = Body(
  636. ...,
  637. description="Metadata to append to the document.",
  638. ),
  639. auth_user=Depends(self.providers.auth.auth_wrapper()),
  640. ) -> WrappedDocumentResponse:
  641. """Replaces metadata in a document. This endpoint allows overwriting existing metadata fields."""
  642. request_user_ids = (
  643. None if auth_user.is_superuser else [auth_user.id]
  644. )
  645. documents_overview_response = (
  646. await self.services.management.documents_overview(
  647. user_ids=request_user_ids,
  648. document_ids=[id],
  649. offset=0,
  650. limit=1,
  651. )
  652. )
  653. results = documents_overview_response["results"]
  654. if len(results) == 0:
  655. raise R2RException("Document not found.", 404)
  656. return await self.services.management.update_document_metadata(
  657. document_id=id,
  658. metadata=metadata,
  659. overwrite=True,
  660. )
  661. @self.router.post(
  662. "/documents/export",
  663. summary="Export documents to CSV",
  664. dependencies=[Depends(self.rate_limit_dependency)],
  665. openapi_extra={
  666. "x-codeSamples": [
  667. {
  668. "lang": "Python",
  669. "source": textwrap.dedent("""
  670. from r2r import R2RClient
  671. client = R2RClient("http://localhost:7272")
  672. # when using auth, do client.login(...)
  673. response = client.documents.export(
  674. output_path="export.csv",
  675. columns=["id", "title", "created_at"],
  676. include_header=True,
  677. )
  678. """),
  679. },
  680. {
  681. "lang": "JavaScript",
  682. "source": textwrap.dedent("""
  683. const { r2rClient } = require("r2r-js");
  684. const client = new r2rClient("http://localhost:7272");
  685. function main() {
  686. await client.documents.export({
  687. outputPath: "export.csv",
  688. columns: ["id", "title", "created_at"],
  689. includeHeader: true,
  690. });
  691. }
  692. main();
  693. """),
  694. },
  695. {
  696. "lang": "cURL",
  697. "source": textwrap.dedent("""
  698. curl -X POST "http://127.0.0.1:7272/v3/documents/export" \
  699. -H "Authorization: Bearer YOUR_API_KEY" \
  700. -H "Content-Type: application/json" \
  701. -H "Accept: text/csv" \
  702. -d '{ "columns": ["id", "title", "created_at"], "include_header": true }' \
  703. --output export.csv
  704. """),
  705. },
  706. ]
  707. },
  708. )
  709. @self.base_endpoint
  710. async def export_documents(
  711. background_tasks: BackgroundTasks,
  712. columns: Optional[list[str]] = Body(
  713. None, description="Specific columns to export"
  714. ),
  715. filters: Optional[dict] = Body(
  716. None, description="Filters to apply to the export"
  717. ),
  718. include_header: Optional[bool] = Body(
  719. True, description="Whether to include column headers"
  720. ),
  721. auth_user=Depends(self.providers.auth.auth_wrapper()),
  722. ) -> FileResponse:
  723. """Export documents as a downloadable CSV file."""
  724. if not auth_user.is_superuser:
  725. raise R2RException(
  726. "Only a superuser can export data.",
  727. 403,
  728. )
  729. (
  730. csv_file_path,
  731. temp_file,
  732. ) = await self.services.management.export_documents(
  733. columns=columns,
  734. filters=filters,
  735. include_header=include_header
  736. if include_header is not None
  737. else True,
  738. )
  739. background_tasks.add_task(temp_file.close)
  740. return FileResponse(
  741. path=csv_file_path,
  742. media_type="text/csv",
  743. filename="documents_export.csv",
  744. )
  745. @self.router.get(
  746. "/documents/download_zip",
  747. dependencies=[Depends(self.rate_limit_dependency)],
  748. response_class=StreamingResponse,
  749. summary="Export multiple documents as zip",
  750. openapi_extra={
  751. "x-codeSamples": [
  752. {
  753. "lang": "Python",
  754. "source": textwrap.dedent("""
  755. client.documents.download_zip(
  756. document_ids=["uuid1", "uuid2"],
  757. start_date="2024-01-01",
  758. end_date="2024-12-31"
  759. )
  760. """),
  761. },
  762. {
  763. "lang": "cURL",
  764. "source": textwrap.dedent("""
  765. curl -X GET "https://api.example.com/v3/documents/download_zip?document_ids=uuid1,uuid2&start_date=2024-01-01&end_date=2024-12-31" \\
  766. -H "Authorization: Bearer YOUR_API_KEY"
  767. """),
  768. },
  769. ]
  770. },
  771. )
  772. @self.base_endpoint
  773. async def export_files(
  774. document_ids: Optional[list[UUID]] = Query(
  775. None,
  776. description="List of document IDs to include in the export. If not provided, all accessible documents will be included.",
  777. ),
  778. start_date: Optional[datetime] = Query(
  779. None,
  780. description="Filter documents created on or after this date.",
  781. ),
  782. end_date: Optional[datetime] = Query(
  783. None,
  784. description="Filter documents created before this date.",
  785. ),
  786. auth_user=Depends(self.providers.auth.auth_wrapper()),
  787. ) -> StreamingResponse:
  788. """Export multiple documents as a zip file. Documents can be
  789. filtered by IDs and/or date range.
  790. The endpoint allows downloading:
  791. - Specific documents by providing their IDs
  792. - Documents within a date range
  793. - All accessible documents if no filters are provided
  794. Files are streamed as a zip archive to handle potentially large downloads efficiently.
  795. """
  796. if not auth_user.is_superuser:
  797. # For non-superusers, verify access to requested documents
  798. if document_ids:
  799. documents_overview = (
  800. await self.services.management.documents_overview(
  801. user_ids=[auth_user.id],
  802. document_ids=document_ids,
  803. offset=0,
  804. limit=len(document_ids),
  805. )
  806. )
  807. if len(documents_overview["results"]) != len(document_ids):
  808. raise R2RException(
  809. status_code=403,
  810. message="You don't have access to one or more requested documents.",
  811. )
  812. if not document_ids:
  813. raise R2RException(
  814. status_code=403,
  815. message="Non-superusers must provide document IDs to export.",
  816. )
  817. (
  818. zip_name,
  819. zip_content,
  820. zip_size,
  821. ) = await self.services.management.export_files(
  822. document_ids=document_ids,
  823. start_date=start_date,
  824. end_date=end_date,
  825. )
  826. encoded_filename = quote(zip_name)
  827. async def stream_file():
  828. yield zip_content.getvalue()
  829. return StreamingResponse(
  830. stream_file(),
  831. media_type="application/zip",
  832. headers={
  833. "Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}",
  834. "Content-Length": str(zip_size),
  835. },
  836. )
  837. @self.router.get(
  838. "/documents",
  839. dependencies=[Depends(self.rate_limit_dependency)],
  840. summary="List documents",
  841. openapi_extra={
  842. "x-codeSamples": [
  843. {
  844. "lang": "Python",
  845. "source": textwrap.dedent("""
  846. from r2r import R2RClient
  847. client = R2RClient()
  848. # when using auth, do client.login(...)
  849. response = client.documents.list(
  850. limit=10,
  851. offset=0
  852. )
  853. """),
  854. },
  855. {
  856. "lang": "JavaScript",
  857. "source": textwrap.dedent("""
  858. const { r2rClient } = require("r2r-js");
  859. const client = new r2rClient();
  860. function main() {
  861. const response = await client.documents.list({
  862. limit: 10,
  863. offset: 0,
  864. });
  865. }
  866. main();
  867. """),
  868. },
  869. {
  870. "lang": "cURL",
  871. "source": textwrap.dedent("""
  872. curl -X GET "https://api.example.com/v3/documents" \\
  873. -H "Authorization: Bearer YOUR_API_KEY"
  874. """),
  875. },
  876. ]
  877. },
  878. )
  879. @self.base_endpoint
  880. async def get_documents(
  881. ids: list[str] = Query(
  882. [],
  883. description="A list of document IDs to retrieve. If not provided, all documents will be returned.",
  884. ),
  885. offset: int = Query(
  886. 0,
  887. ge=0,
  888. description="Specifies the number of objects to skip. Defaults to 0.",
  889. ),
  890. limit: int = Query(
  891. 100,
  892. ge=1,
  893. le=1000,
  894. description="Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.",
  895. ),
  896. include_summary_embeddings: bool = Query(
  897. False,
  898. description="Specifies whether or not to include embeddings of each document summary.",
  899. ),
  900. auth_user=Depends(self.providers.auth.auth_wrapper()),
  901. ) -> WrappedDocumentsResponse:
  902. """Returns a paginated list of documents the authenticated user has
  903. access to.
  904. Results can be filtered by providing specific document IDs. Regular
  905. users will only see documents they own or have access to through
  906. collections. Superusers can see all documents.
  907. The documents are returned in order of last modification, with most
  908. recent first.
  909. """
  910. requesting_user_id = (
  911. None if auth_user.is_superuser else [auth_user.id]
  912. )
  913. filter_collection_ids = (
  914. None if auth_user.is_superuser else auth_user.collection_ids
  915. )
  916. document_uuids = [UUID(document_id) for document_id in ids]
  917. documents_overview_response = (
  918. await self.services.management.documents_overview(
  919. user_ids=requesting_user_id,
  920. collection_ids=filter_collection_ids,
  921. document_ids=document_uuids,
  922. offset=offset,
  923. limit=limit,
  924. )
  925. )
  926. if not include_summary_embeddings:
  927. for document in documents_overview_response["results"]:
  928. document.summary_embedding = None
  929. return ( # type: ignore
  930. documents_overview_response["results"],
  931. {
  932. "total_entries": documents_overview_response[
  933. "total_entries"
  934. ]
  935. },
  936. )
  937. @self.router.get(
  938. "/documents/{id}",
  939. dependencies=[Depends(self.rate_limit_dependency)],
  940. summary="Retrieve a document",
  941. openapi_extra={
  942. "x-codeSamples": [
  943. {
  944. "lang": "Python",
  945. "source": textwrap.dedent("""
  946. from r2r import R2RClient
  947. client = R2RClient()
  948. # when using auth, do client.login(...)
  949. response = client.documents.retrieve(
  950. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  951. )
  952. """),
  953. },
  954. {
  955. "lang": "JavaScript",
  956. "source": textwrap.dedent("""
  957. const { r2rClient } = require("r2r-js");
  958. const client = new r2rClient();
  959. function main() {
  960. const response = await client.documents.retrieve({
  961. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  962. });
  963. }
  964. main();
  965. """),
  966. },
  967. {
  968. "lang": "cURL",
  969. "source": textwrap.dedent("""
  970. curl -X GET "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa" \\
  971. -H "Authorization: Bearer YOUR_API_KEY"
  972. """),
  973. },
  974. ]
  975. },
  976. )
  977. @self.base_endpoint
  978. async def get_document(
  979. id: UUID = Path(
  980. ...,
  981. description="The ID of the document to retrieve.",
  982. ),
  983. auth_user=Depends(self.providers.auth.auth_wrapper()),
  984. ) -> WrappedDocumentResponse:
  985. """Retrieves detailed information about a specific document by its
  986. ID.
  987. This endpoint returns the document's metadata, status, and system information. It does not
  988. return the document's content - use the `/documents/{id}/download` endpoint for that.
  989. Users can only retrieve documents they own or have access to through collections.
  990. Superusers can retrieve any document.
  991. """
  992. request_user_ids = (
  993. None if auth_user.is_superuser else [auth_user.id]
  994. )
  995. filter_collection_ids = (
  996. None if auth_user.is_superuser else auth_user.collection_ids
  997. )
  998. documents_overview_response = await self.services.management.documents_overview( # FIXME: This was using the pagination defaults from before... We need to review if this is as intended.
  999. user_ids=request_user_ids,
  1000. collection_ids=filter_collection_ids,
  1001. document_ids=[id],
  1002. offset=0,
  1003. limit=100,
  1004. )
  1005. results = documents_overview_response["results"]
  1006. if len(results) == 0:
  1007. raise R2RException("Document not found.", 404)
  1008. return results[0]
  1009. @self.router.get(
  1010. "/documents/{id}/chunks",
  1011. dependencies=[Depends(self.rate_limit_dependency)],
  1012. summary="List document chunks",
  1013. openapi_extra={
  1014. "x-codeSamples": [
  1015. {
  1016. "lang": "Python",
  1017. "source": textwrap.dedent("""
  1018. from r2r import R2RClient
  1019. client = R2RClient()
  1020. # when using auth, do client.login(...)
  1021. response = client.documents.list_chunks(
  1022. id="32b6a70f-a995-5c51-85d2-834f06283a1e"
  1023. )
  1024. """),
  1025. },
  1026. {
  1027. "lang": "JavaScript",
  1028. "source": textwrap.dedent("""
  1029. const { r2rClient } = require("r2r-js");
  1030. const client = new r2rClient();
  1031. function main() {
  1032. const response = await client.documents.listChunks({
  1033. id: "32b6a70f-a995-5c51-85d2-834f06283a1e",
  1034. });
  1035. }
  1036. main();
  1037. """),
  1038. },
  1039. {
  1040. "lang": "cURL",
  1041. "source": textwrap.dedent("""
  1042. curl -X GET "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa/chunks" \\
  1043. -H "Authorization: Bearer YOUR_API_KEY"\
  1044. """),
  1045. },
  1046. ]
  1047. },
  1048. )
  1049. @self.base_endpoint
  1050. async def list_chunks(
  1051. id: UUID = Path(
  1052. ...,
  1053. description="The ID of the document to retrieve chunks for.",
  1054. ),
  1055. offset: int = Query(
  1056. 0,
  1057. ge=0,
  1058. description="Specifies the number of objects to skip. Defaults to 0.",
  1059. ),
  1060. limit: int = Query(
  1061. 100,
  1062. ge=1,
  1063. le=1000,
  1064. description="Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.",
  1065. ),
  1066. include_vectors: Optional[bool] = Query(
  1067. False,
  1068. description="Whether to include vector embeddings in the response.",
  1069. ),
  1070. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1071. ) -> WrappedChunksResponse:
  1072. """Retrieves the text chunks that were generated from a document
  1073. during ingestion. Chunks represent semantic sections of the
  1074. document and are used for retrieval and analysis.
  1075. Users can only access chunks from documents they own or have access
  1076. to through collections. Vector embeddings are only included if
  1077. specifically requested.
  1078. Results are returned in chunk sequence order, representing their
  1079. position in the original document.
  1080. """
  1081. list_document_chunks = (
  1082. await self.services.management.list_document_chunks(
  1083. document_id=id,
  1084. offset=offset,
  1085. limit=limit,
  1086. include_vectors=include_vectors or False,
  1087. )
  1088. )
  1089. if not list_document_chunks["results"]:
  1090. raise R2RException(
  1091. "No chunks found for the given document ID.", 404
  1092. )
  1093. is_owner = str(
  1094. list_document_chunks["results"][0].get("owner_id")
  1095. ) == str(auth_user.id)
  1096. document_collections = (
  1097. await self.services.management.collections_overview(
  1098. offset=0,
  1099. limit=-1,
  1100. document_ids=[id],
  1101. )
  1102. )
  1103. user_has_access = (
  1104. is_owner
  1105. or set(auth_user.collection_ids).intersection(
  1106. {ele.id for ele in document_collections["results"]} # type: ignore
  1107. )
  1108. != set()
  1109. )
  1110. if not user_has_access and not auth_user.is_superuser:
  1111. raise R2RException(
  1112. "Not authorized to access this document's chunks.", 403
  1113. )
  1114. return ( # type: ignore
  1115. list_document_chunks["results"],
  1116. {"total_entries": list_document_chunks["total_entries"]},
  1117. )
  1118. @self.router.get(
  1119. "/documents/{id}/download",
  1120. dependencies=[Depends(self.rate_limit_dependency)],
  1121. response_class=StreamingResponse,
  1122. summary="Download document content",
  1123. openapi_extra={
  1124. "x-codeSamples": [
  1125. {
  1126. "lang": "Python",
  1127. "source": textwrap.dedent("""
  1128. from r2r import R2RClient
  1129. client = R2RClient()
  1130. # when using auth, do client.login(...)
  1131. response = client.documents.download(
  1132. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  1133. )
  1134. """),
  1135. },
  1136. {
  1137. "lang": "JavaScript",
  1138. "source": textwrap.dedent("""
  1139. const { r2rClient } = require("r2r-js");
  1140. const client = new r2rClient();
  1141. function main() {
  1142. const response = await client.documents.download({
  1143. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1144. });
  1145. }
  1146. main();
  1147. """),
  1148. },
  1149. {
  1150. "lang": "cURL",
  1151. "source": textwrap.dedent("""
  1152. curl -X GET "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa/download" \\
  1153. -H "Authorization: Bearer YOUR_API_KEY"
  1154. """),
  1155. },
  1156. ]
  1157. },
  1158. )
  1159. @self.base_endpoint
  1160. async def get_document_file(
  1161. id: str = Path(..., description="Document ID"),
  1162. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1163. ) -> StreamingResponse:
  1164. """Downloads the original file content of a document.
  1165. For uploaded files, returns the original file with its proper MIME
  1166. type. For text-only documents, returns the content as plain text.
  1167. Users can only download documents they own or have access to
  1168. through collections.
  1169. """
  1170. try:
  1171. document_uuid = UUID(id)
  1172. except ValueError:
  1173. raise R2RException(
  1174. status_code=422, message="Invalid document ID format."
  1175. ) from None
  1176. # Retrieve the document's information
  1177. documents_overview_response = (
  1178. await self.services.management.documents_overview(
  1179. user_ids=None,
  1180. collection_ids=None,
  1181. document_ids=[document_uuid],
  1182. offset=0,
  1183. limit=1,
  1184. )
  1185. )
  1186. if not documents_overview_response["results"]:
  1187. raise R2RException("Document not found.", 404)
  1188. document = documents_overview_response["results"][0]
  1189. is_owner = str(document.owner_id) == str(auth_user.id)
  1190. if not auth_user.is_superuser and not is_owner:
  1191. document_collections = (
  1192. await self.services.management.collections_overview(
  1193. offset=0,
  1194. limit=-1,
  1195. document_ids=[document_uuid],
  1196. )
  1197. )
  1198. document_collection_ids = {
  1199. str(ele.id)
  1200. for ele in document_collections["results"] # type: ignore
  1201. }
  1202. user_collection_ids = {
  1203. str(cid) for cid in auth_user.collection_ids
  1204. }
  1205. has_collection_access = user_collection_ids.intersection(
  1206. document_collection_ids
  1207. )
  1208. if not has_collection_access:
  1209. raise R2RException(
  1210. "Not authorized to access this document.", 403
  1211. )
  1212. file_tuple = await self.services.management.download_file(
  1213. document_uuid
  1214. )
  1215. if not file_tuple:
  1216. raise R2RException(status_code=404, message="File not found.")
  1217. file_name, file_content, file_size = file_tuple
  1218. encoded_filename = quote(file_name)
  1219. mime_type, _ = mimetypes.guess_type(file_name)
  1220. if not mime_type:
  1221. mime_type = "application/octet-stream"
  1222. async def file_stream():
  1223. chunk_size = 1024 * 1024 # 1MB
  1224. while True:
  1225. data = file_content.read(chunk_size)
  1226. if not data:
  1227. break
  1228. yield data
  1229. return StreamingResponse(
  1230. file_stream(),
  1231. media_type=mime_type,
  1232. headers={
  1233. "Content-Disposition": f"inline; filename*=UTF-8''{encoded_filename}",
  1234. "Content-Length": str(file_size),
  1235. },
  1236. )
  1237. @self.router.delete(
  1238. "/documents/by-filter",
  1239. dependencies=[Depends(self.rate_limit_dependency)],
  1240. summary="Delete documents by filter",
  1241. openapi_extra={
  1242. "x-codeSamples": [
  1243. {
  1244. "lang": "Python",
  1245. "source": textwrap.dedent("""
  1246. from r2r import R2RClient
  1247. client = R2RClient()
  1248. # when using auth, do client.login(...)
  1249. response = client.documents.delete_by_filter(
  1250. filters={"document_type": {"$eq": "txt"}}
  1251. )
  1252. """),
  1253. },
  1254. {
  1255. "lang": "cURL",
  1256. "source": textwrap.dedent("""
  1257. curl -X DELETE "https://api.example.com/v3/documents/by-filter?filters=%7B%22document_type%22%3A%7B%22%24eq%22%3A%22text%22%7D%2C%22created_at%22%3A%7B%22%24lt%22%3A%222023-01-01T00%3A00%3A00Z%22%7D%7D" \\
  1258. -H "Authorization: Bearer YOUR_API_KEY"
  1259. """),
  1260. },
  1261. ]
  1262. },
  1263. )
  1264. @self.base_endpoint
  1265. async def delete_document_by_filter(
  1266. filters: Json[dict] = Body(
  1267. ..., description="JSON-encoded filters"
  1268. ),
  1269. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1270. ) -> WrappedBooleanResponse:
  1271. """Delete documents based on provided filters.
  1272. Allowed operators
  1273. include: `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, `like`,
  1274. `ilike`, `in`, and `nin`. Deletion requests are limited to a
  1275. user's own documents.
  1276. """
  1277. filters_dict = {
  1278. "$and": [{"owner_id": {"$eq": str(auth_user.id)}}, filters]
  1279. }
  1280. await (
  1281. self.services.management.delete_documents_and_chunks_by_filter(
  1282. filters=filters_dict
  1283. )
  1284. )
  1285. return GenericBooleanResponse(success=True) # type: ignore
  1286. @self.router.delete(
  1287. "/documents/{id}",
  1288. dependencies=[Depends(self.rate_limit_dependency)],
  1289. summary="Delete a document",
  1290. openapi_extra={
  1291. "x-codeSamples": [
  1292. {
  1293. "lang": "Python",
  1294. "source": textwrap.dedent("""
  1295. from r2r import R2RClient
  1296. client = R2RClient()
  1297. # when using auth, do client.login(...)
  1298. response = client.documents.delete(
  1299. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  1300. )
  1301. """),
  1302. },
  1303. {
  1304. "lang": "JavaScript",
  1305. "source": textwrap.dedent("""
  1306. const { r2rClient } = require("r2r-js");
  1307. const client = new r2rClient();
  1308. function main() {
  1309. const response = await client.documents.delete({
  1310. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1311. });
  1312. }
  1313. main();
  1314. """),
  1315. },
  1316. {
  1317. "lang": "cURL",
  1318. "source": textwrap.dedent("""
  1319. curl -X DELETE "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa" \\
  1320. -H "Authorization: Bearer YOUR_API_KEY"
  1321. """),
  1322. },
  1323. ]
  1324. },
  1325. )
  1326. @self.base_endpoint
  1327. async def delete_document_by_id(
  1328. id: UUID = Path(..., description="Document ID"),
  1329. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1330. ) -> WrappedBooleanResponse:
  1331. """Delete a specific document. All chunks corresponding to the
  1332. document are deleted, and all other references to the document are
  1333. removed.
  1334. NOTE - Deletions do not yet impact the knowledge graph or other derived data. This feature is planned for a future release.
  1335. """
  1336. filters: dict[str, Any] = {"document_id": {"$eq": str(id)}}
  1337. if not auth_user.is_superuser:
  1338. filters = {
  1339. "$and": [
  1340. {"owner_id": {"$eq": str(auth_user.id)}},
  1341. {"document_id": {"$eq": str(id)}},
  1342. ]
  1343. }
  1344. await (
  1345. self.services.management.delete_documents_and_chunks_by_filter(
  1346. filters=filters
  1347. )
  1348. )
  1349. return GenericBooleanResponse(success=True) # type: ignore
  1350. @self.router.get(
  1351. "/documents/{id}/collections",
  1352. dependencies=[Depends(self.rate_limit_dependency)],
  1353. summary="List document collections",
  1354. openapi_extra={
  1355. "x-codeSamples": [
  1356. {
  1357. "lang": "Python",
  1358. "source": textwrap.dedent("""
  1359. from r2r import R2RClient
  1360. client = R2RClient()
  1361. # when using auth, do client.login(...)
  1362. response = client.documents.list_collections(
  1363. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa", offset=0, limit=10
  1364. )
  1365. """),
  1366. },
  1367. {
  1368. "lang": "JavaScript",
  1369. "source": textwrap.dedent("""
  1370. const { r2rClient } = require("r2r-js");
  1371. const client = new r2rClient();
  1372. function main() {
  1373. const response = await client.documents.listCollections({
  1374. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1375. });
  1376. }
  1377. main();
  1378. """),
  1379. },
  1380. {
  1381. "lang": "cURL",
  1382. "source": textwrap.dedent("""
  1383. curl -X GET "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa/collections" \\
  1384. -H "Authorization: Bearer YOUR_API_KEY"
  1385. """),
  1386. },
  1387. ]
  1388. },
  1389. )
  1390. @self.base_endpoint
  1391. async def get_document_collections(
  1392. id: str = Path(..., description="Document ID"),
  1393. offset: int = Query(
  1394. 0,
  1395. ge=0,
  1396. description="Specifies the number of objects to skip. Defaults to 0.",
  1397. ),
  1398. limit: int = Query(
  1399. 100,
  1400. ge=1,
  1401. le=1000,
  1402. description="Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.",
  1403. ),
  1404. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1405. ) -> WrappedCollectionsResponse:
  1406. """Retrieves all collections that contain the specified document.
  1407. This endpoint is restricted to superusers only and provides a
  1408. system-wide view of document organization.
  1409. Collections are used to organize documents and manage access control. A document can belong
  1410. to multiple collections, and users can access documents through collection membership.
  1411. The results are paginated and ordered by collection creation date, with the most recently
  1412. created collections appearing first.
  1413. NOTE - This endpoint is only available to superusers, it will be extended to regular users in a future release.
  1414. """
  1415. if not auth_user.is_superuser:
  1416. raise R2RException(
  1417. "Only a superuser can get the collections belonging to a document.",
  1418. 403,
  1419. )
  1420. collections_response = (
  1421. await self.services.management.collections_overview(
  1422. offset=offset,
  1423. limit=limit,
  1424. document_ids=[UUID(id)], # Convert string ID to UUID
  1425. )
  1426. )
  1427. return collections_response["results"], { # type: ignore
  1428. "total_entries": collections_response["total_entries"]
  1429. }
  1430. @self.router.post(
  1431. "/documents/{id}/extract",
  1432. dependencies=[Depends(self.rate_limit_dependency)],
  1433. summary="Extract entities and relationships",
  1434. openapi_extra={
  1435. "x-codeSamples": [
  1436. {
  1437. "lang": "Python",
  1438. "source": textwrap.dedent("""
  1439. from r2r import R2RClient
  1440. client = R2RClient()
  1441. # when using auth, do client.login(...)
  1442. response = client.documents.extract(
  1443. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  1444. )
  1445. """),
  1446. },
  1447. ],
  1448. },
  1449. )
  1450. @self.base_endpoint
  1451. async def extract(
  1452. id: UUID = Path(
  1453. ...,
  1454. description="The ID of the document to extract entities and relationships from.",
  1455. ),
  1456. settings: Optional[GraphCreationSettings] = Body(
  1457. default=None,
  1458. description="Settings for the entities and relationships extraction process.",
  1459. ),
  1460. run_with_orchestration: Optional[bool] = Body(
  1461. default=True,
  1462. description="Whether to run the entities and relationships extraction process with orchestration.",
  1463. ),
  1464. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1465. ) -> WrappedGenericMessageResponse:
  1466. """Extracts entities and relationships from a document.
  1467. The entities and relationships extraction process involves:
  1468. 1. Parsing documents into semantic chunks
  1469. 2. Extracting entities and relationships using LLMs
  1470. 3. Storing the created entities and relationships in the knowledge graph
  1471. 4. Preserving the document's metadata and content, and associating the elements with collections the document belongs to
  1472. """
  1473. settings = settings.dict() if settings else None # type: ignore
  1474. documents_overview_response = (
  1475. await self.services.management.documents_overview(
  1476. user_ids=(
  1477. None if auth_user.is_superuser else [auth_user.id]
  1478. ),
  1479. collection_ids=(
  1480. None
  1481. if auth_user.is_superuser
  1482. else auth_user.collection_ids
  1483. ),
  1484. document_ids=[id],
  1485. offset=0,
  1486. limit=1,
  1487. )
  1488. )["results"]
  1489. if len(documents_overview_response) == 0:
  1490. raise R2RException("Document not found.", 404)
  1491. if (
  1492. not auth_user.is_superuser
  1493. and auth_user.id != documents_overview_response[0].owner_id
  1494. ):
  1495. raise R2RException(
  1496. "Only a superuser can extract entities and relationships from a document they do not own.",
  1497. 403,
  1498. )
  1499. # Apply runtime settings overrides
  1500. server_graph_creation_settings = (
  1501. self.providers.database.config.graph_creation_settings
  1502. )
  1503. if settings:
  1504. server_graph_creation_settings = update_settings_from_dict(
  1505. server_settings=server_graph_creation_settings,
  1506. settings_dict=settings, # type: ignore
  1507. )
  1508. workflow_input = {
  1509. "document_id": str(id),
  1510. "graph_creation_settings": server_graph_creation_settings.model_dump_json(),
  1511. "user": auth_user.json(),
  1512. }
  1513. if run_with_orchestration:
  1514. try:
  1515. return await self.providers.orchestration.run_workflow( # type: ignore
  1516. "graph-extraction", {"request": workflow_input}, {}
  1517. )
  1518. except Exception as e: # TODO: Need to find specific errors that we should be excepting (gRPC most likely?)
  1519. logger.error(
  1520. f"Error running orchestrated extraction: {e} \n\nAttempting to run without orchestration."
  1521. )
  1522. from core.main.orchestration import (
  1523. simple_graph_search_results_factory,
  1524. )
  1525. logger.info("Running extract-triples without orchestration.")
  1526. simple_graph_search_results = simple_graph_search_results_factory(
  1527. self.services.graph
  1528. )
  1529. await simple_graph_search_results["graph-extraction"](
  1530. workflow_input
  1531. )
  1532. return { # type: ignore
  1533. "message": "Graph created successfully.",
  1534. "task_id": None,
  1535. }
  1536. @self.router.post(
  1537. "/documents/{id}/deduplicate",
  1538. dependencies=[Depends(self.rate_limit_dependency)],
  1539. summary="Deduplicate entities",
  1540. openapi_extra={
  1541. "x-codeSamples": [
  1542. {
  1543. "lang": "Python",
  1544. "source": textwrap.dedent("""
  1545. from r2r import R2RClient
  1546. client = R2RClient()
  1547. response = client.documents.deduplicate(
  1548. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  1549. )
  1550. """),
  1551. },
  1552. {
  1553. "lang": "JavaScript",
  1554. "source": textwrap.dedent("""
  1555. const { r2rClient } = require("r2r-js");
  1556. const client = new r2rClient();
  1557. function main() {
  1558. const response = await client.documents.deduplicate({
  1559. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1560. });
  1561. }
  1562. main();
  1563. """),
  1564. },
  1565. {
  1566. "lang": "cURL",
  1567. "source": textwrap.dedent("""
  1568. curl -X POST "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa/deduplicate" \\
  1569. -H "Authorization: Bearer YOUR_API_KEY"
  1570. """),
  1571. },
  1572. ],
  1573. },
  1574. )
  1575. @self.base_endpoint
  1576. async def deduplicate(
  1577. id: UUID = Path(
  1578. ...,
  1579. description="The ID of the document to extract entities and relationships from.",
  1580. ),
  1581. settings: Optional[GraphCreationSettings] = Body(
  1582. default=None,
  1583. description="Settings for the entities and relationships extraction process.",
  1584. ),
  1585. run_with_orchestration: Optional[bool] = Body(
  1586. default=True,
  1587. description="Whether to run the entities and relationships extraction process with orchestration.",
  1588. ),
  1589. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1590. ) -> WrappedGenericMessageResponse:
  1591. """Deduplicates entities from a document."""
  1592. settings = settings.model_dump() if settings else None # type: ignore
  1593. documents_overview_response = (
  1594. await self.services.management.documents_overview(
  1595. user_ids=(
  1596. None if auth_user.is_superuser else [auth_user.id]
  1597. ),
  1598. collection_ids=(
  1599. None
  1600. if auth_user.is_superuser
  1601. else auth_user.collection_ids
  1602. ),
  1603. document_ids=[id],
  1604. offset=0,
  1605. limit=1,
  1606. )
  1607. )["results"]
  1608. if len(documents_overview_response) == 0:
  1609. raise R2RException("Document not found.", 404)
  1610. if (
  1611. not auth_user.is_superuser
  1612. and auth_user.id != documents_overview_response[0].owner_id
  1613. ):
  1614. raise R2RException(
  1615. "Only a superuser can run deduplication on a document they do not own.",
  1616. 403,
  1617. )
  1618. # Apply runtime settings overrides
  1619. server_graph_creation_settings = (
  1620. self.providers.database.config.graph_creation_settings
  1621. )
  1622. if settings:
  1623. server_graph_creation_settings = update_settings_from_dict(
  1624. server_settings=server_graph_creation_settings,
  1625. settings_dict=settings, # type: ignore
  1626. )
  1627. if run_with_orchestration:
  1628. try:
  1629. workflow_input = {
  1630. "document_id": str(id),
  1631. }
  1632. return await self.providers.orchestration.run_workflow( # type: ignore
  1633. "graph-deduplication",
  1634. {"request": workflow_input},
  1635. {},
  1636. )
  1637. except Exception as e: # TODO: Need to find specific errors that we should be excepting (gRPC most likely?)
  1638. logger.error(
  1639. f"Error running orchestrated deduplication: {e} \n\nAttempting to run without orchestration."
  1640. )
  1641. from core.main.orchestration import (
  1642. simple_graph_search_results_factory,
  1643. )
  1644. logger.info(
  1645. "Running deduplicate-document-entities without orchestration."
  1646. )
  1647. simple_graph_search_results = simple_graph_search_results_factory(
  1648. self.services.graph
  1649. )
  1650. await simple_graph_search_results["graph-deduplication"](
  1651. workflow_input
  1652. )
  1653. return { # type: ignore
  1654. "message": "Graph created successfully.",
  1655. "task_id": None,
  1656. }
  1657. @self.router.get(
  1658. "/documents/{id}/entities",
  1659. dependencies=[Depends(self.rate_limit_dependency)],
  1660. summary="Lists the entities from the document",
  1661. openapi_extra={
  1662. "x-codeSamples": [
  1663. {
  1664. "lang": "Python",
  1665. "source": textwrap.dedent("""
  1666. from r2r import R2RClient
  1667. client = R2RClient()
  1668. # when using auth, do client.login(...)
  1669. response = client.documents.extract(
  1670. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa"
  1671. )
  1672. """),
  1673. },
  1674. ],
  1675. },
  1676. )
  1677. @self.base_endpoint
  1678. async def get_entities(
  1679. id: UUID = Path(
  1680. ...,
  1681. description="The ID of the document to retrieve entities from.",
  1682. ),
  1683. offset: int = Query(
  1684. 0,
  1685. ge=0,
  1686. description="Specifies the number of objects to skip. Defaults to 0.",
  1687. ),
  1688. limit: int = Query(
  1689. 100,
  1690. ge=1,
  1691. le=1000,
  1692. description="Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.",
  1693. ),
  1694. include_embeddings: Optional[bool] = Query(
  1695. False,
  1696. description="Whether to include vector embeddings in the response.",
  1697. ),
  1698. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1699. ) -> WrappedEntitiesResponse:
  1700. """Retrieves the entities that were extracted from a document.
  1701. These represent important semantic elements like people, places,
  1702. organizations, concepts, etc.
  1703. Users can only access entities from documents they own or have
  1704. access to through collections. Entity embeddings are only included
  1705. if specifically requested.
  1706. Results are returned in the order they were extracted from the
  1707. document.
  1708. """
  1709. # if (
  1710. # not auth_user.is_superuser
  1711. # and id not in auth_user.collection_ids
  1712. # ):
  1713. # raise R2RException(
  1714. # "The currently authenticated user does not have access to the specified collection.",
  1715. # 403,
  1716. # )
  1717. # First check if the document exists and user has access
  1718. documents_overview_response = (
  1719. await self.services.management.documents_overview(
  1720. user_ids=(
  1721. None if auth_user.is_superuser else [auth_user.id]
  1722. ),
  1723. collection_ids=(
  1724. None
  1725. if auth_user.is_superuser
  1726. else auth_user.collection_ids
  1727. ),
  1728. document_ids=[id],
  1729. offset=0,
  1730. limit=1,
  1731. )
  1732. )
  1733. if not documents_overview_response["results"]:
  1734. raise R2RException("Document not found.", 404)
  1735. # Get all entities for this document from the document_entity table
  1736. (
  1737. entities,
  1738. count,
  1739. ) = await self.providers.database.graphs_handler.entities.get(
  1740. parent_id=id,
  1741. store_type=StoreType.DOCUMENTS,
  1742. offset=offset,
  1743. limit=limit,
  1744. include_embeddings=include_embeddings or False,
  1745. )
  1746. return entities, {"total_entries": count} # type: ignore
  1747. @self.router.post(
  1748. "/documents/{id}/entities/export",
  1749. summary="Export document entities to CSV",
  1750. dependencies=[Depends(self.rate_limit_dependency)],
  1751. openapi_extra={
  1752. "x-codeSamples": [
  1753. {
  1754. "lang": "Python",
  1755. "source": textwrap.dedent("""
  1756. from r2r import R2RClient
  1757. client = R2RClient("http://localhost:7272")
  1758. # when using auth, do client.login(...)
  1759. response = client.documents.export_entities(
  1760. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1761. output_path="export.csv",
  1762. columns=["id", "title", "created_at"],
  1763. include_header=True,
  1764. )
  1765. """),
  1766. },
  1767. {
  1768. "lang": "JavaScript",
  1769. "source": textwrap.dedent("""
  1770. const { r2rClient } = require("r2r-js");
  1771. const client = new r2rClient("http://localhost:7272");
  1772. function main() {
  1773. await client.documents.exportEntities({
  1774. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1775. outputPath: "export.csv",
  1776. columns: ["id", "title", "created_at"],
  1777. includeHeader: true,
  1778. });
  1779. }
  1780. main();
  1781. """),
  1782. },
  1783. {
  1784. "lang": "cURL",
  1785. "source": textwrap.dedent("""
  1786. curl -X POST "http://127.0.0.1:7272/v3/documents/export_entities" \
  1787. -H "Authorization: Bearer YOUR_API_KEY" \
  1788. -H "Content-Type: application/json" \
  1789. -H "Accept: text/csv" \
  1790. -d '{ "columns": ["id", "title", "created_at"], "include_header": true }' \
  1791. --output export.csv
  1792. """),
  1793. },
  1794. ]
  1795. },
  1796. )
  1797. @self.base_endpoint
  1798. async def export_entities(
  1799. background_tasks: BackgroundTasks,
  1800. id: UUID = Path(
  1801. ...,
  1802. description="The ID of the document to export entities from.",
  1803. ),
  1804. columns: Optional[list[str]] = Body(
  1805. None, description="Specific columns to export"
  1806. ),
  1807. filters: Optional[dict] = Body(
  1808. None, description="Filters to apply to the export"
  1809. ),
  1810. include_header: Optional[bool] = Body(
  1811. True, description="Whether to include column headers"
  1812. ),
  1813. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1814. ) -> FileResponse:
  1815. """Export documents as a downloadable CSV file."""
  1816. if not auth_user.is_superuser:
  1817. raise R2RException(
  1818. "Only a superuser can export data.",
  1819. 403,
  1820. )
  1821. (
  1822. csv_file_path,
  1823. temp_file,
  1824. ) = await self.services.management.export_document_entities(
  1825. id=id,
  1826. columns=columns,
  1827. filters=filters,
  1828. include_header=include_header
  1829. if include_header is not None
  1830. else True,
  1831. )
  1832. background_tasks.add_task(temp_file.close)
  1833. return FileResponse(
  1834. path=csv_file_path,
  1835. media_type="text/csv",
  1836. filename="documents_export.csv",
  1837. )
  1838. @self.router.get(
  1839. "/documents/{id}/relationships",
  1840. dependencies=[Depends(self.rate_limit_dependency)],
  1841. summary="List document relationships",
  1842. openapi_extra={
  1843. "x-codeSamples": [
  1844. {
  1845. "lang": "Python",
  1846. "source": textwrap.dedent("""
  1847. from r2r import R2RClient
  1848. client = R2RClient()
  1849. # when using auth, do client.login(...)
  1850. response = client.documents.list_relationships(
  1851. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1852. offset=0,
  1853. limit=100
  1854. )
  1855. """),
  1856. },
  1857. {
  1858. "lang": "JavaScript",
  1859. "source": textwrap.dedent("""
  1860. const { r2rClient } = require("r2r-js");
  1861. const client = new r2rClient();
  1862. function main() {
  1863. const response = await client.documents.listRelationships({
  1864. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1865. offset: 0,
  1866. limit: 100,
  1867. });
  1868. }
  1869. main();
  1870. """),
  1871. },
  1872. {
  1873. "lang": "cURL",
  1874. "source": textwrap.dedent("""
  1875. curl -X GET "https://api.example.com/v3/documents/b4ac4dd6-5f27-596e-a55b-7cf242ca30aa/relationships" \\
  1876. -H "Authorization: Bearer YOUR_API_KEY"
  1877. """),
  1878. },
  1879. ]
  1880. },
  1881. )
  1882. @self.base_endpoint
  1883. async def get_relationships(
  1884. id: UUID = Path(
  1885. ...,
  1886. description="The ID of the document to retrieve relationships for.",
  1887. ),
  1888. offset: int = Query(
  1889. 0,
  1890. ge=0,
  1891. description="Specifies the number of objects to skip. Defaults to 0.",
  1892. ),
  1893. limit: int = Query(
  1894. 100,
  1895. ge=1,
  1896. le=1000,
  1897. description="Specifies a limit on the number of objects to return, ranging between 1 and 100. Defaults to 100.",
  1898. ),
  1899. entity_names: Optional[list[str]] = Query(
  1900. None,
  1901. description="Filter relationships by specific entity names.",
  1902. ),
  1903. relationship_types: Optional[list[str]] = Query(
  1904. None,
  1905. description="Filter relationships by specific relationship types.",
  1906. ),
  1907. auth_user=Depends(self.providers.auth.auth_wrapper()),
  1908. ) -> WrappedRelationshipsResponse:
  1909. """Retrieves the relationships between entities that were extracted
  1910. from a document. These represent connections and interactions
  1911. between entities found in the text.
  1912. Users can only access relationships from documents they own or have
  1913. access to through collections. Results can be filtered by entity
  1914. names and relationship types.
  1915. Results are returned in the order they were extracted from the
  1916. document.
  1917. """
  1918. # if (
  1919. # not auth_user.is_superuser
  1920. # and id not in auth_user.collection_ids
  1921. # ):
  1922. # raise R2RException(
  1923. # "The currently authenticated user does not have access to the specified collection.",
  1924. # 403,
  1925. # )
  1926. # First check if the document exists and user has access
  1927. documents_overview_response = (
  1928. await self.services.management.documents_overview(
  1929. user_ids=(
  1930. None if auth_user.is_superuser else [auth_user.id]
  1931. ),
  1932. collection_ids=(
  1933. None
  1934. if auth_user.is_superuser
  1935. else auth_user.collection_ids
  1936. ),
  1937. document_ids=[id],
  1938. offset=0,
  1939. limit=1,
  1940. )
  1941. )
  1942. if not documents_overview_response["results"]:
  1943. raise R2RException("Document not found.", 404)
  1944. # Get relationships for this document
  1945. (
  1946. relationships,
  1947. count,
  1948. ) = await self.providers.database.graphs_handler.relationships.get(
  1949. parent_id=id,
  1950. store_type=StoreType.DOCUMENTS,
  1951. entity_names=entity_names,
  1952. relationship_types=relationship_types,
  1953. offset=offset,
  1954. limit=limit,
  1955. )
  1956. return relationships, {"total_entries": count} # type: ignore
  1957. @self.router.post(
  1958. "/documents/{id}/relationships/export",
  1959. summary="Export document relationships to CSV",
  1960. dependencies=[Depends(self.rate_limit_dependency)],
  1961. openapi_extra={
  1962. "x-codeSamples": [
  1963. {
  1964. "lang": "Python",
  1965. "source": textwrap.dedent("""
  1966. from r2r import R2RClient
  1967. client = R2RClient("http://localhost:7272")
  1968. # when using auth, do client.login(...)
  1969. response = client.documents.export_entities(
  1970. id="b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1971. output_path="export.csv",
  1972. columns=["id", "title", "created_at"],
  1973. include_header=True,
  1974. )
  1975. """),
  1976. },
  1977. {
  1978. "lang": "JavaScript",
  1979. "source": textwrap.dedent("""
  1980. const { r2rClient } = require("r2r-js");
  1981. const client = new r2rClient("http://localhost:7272");
  1982. function main() {
  1983. await client.documents.exportEntities({
  1984. id: "b4ac4dd6-5f27-596e-a55b-7cf242ca30aa",
  1985. outputPath: "export.csv",
  1986. columns: ["id", "title", "created_at"],
  1987. includeHeader: true,
  1988. });
  1989. }
  1990. main();
  1991. """),
  1992. },
  1993. {
  1994. "lang": "cURL",
  1995. "source": textwrap.dedent("""
  1996. curl -X POST "http://127.0.0.1:7272/v3/documents/export_entities" \
  1997. -H "Authorization: Bearer YOUR_API_KEY" \
  1998. -H "Content-Type: application/json" \
  1999. -H "Accept: text/csv" \
  2000. -d '{ "columns": ["id", "title", "created_at"], "include_header": true }' \
  2001. --output export.csv
  2002. """),
  2003. },
  2004. ]
  2005. },
  2006. )
  2007. @self.base_endpoint
  2008. async def export_relationships(
  2009. background_tasks: BackgroundTasks,
  2010. id: UUID = Path(
  2011. ...,
  2012. description="The ID of the document to export entities from.",
  2013. ),
  2014. columns: Optional[list[str]] = Body(
  2015. None, description="Specific columns to export"
  2016. ),
  2017. filters: Optional[dict] = Body(
  2018. None, description="Filters to apply to the export"
  2019. ),
  2020. include_header: Optional[bool] = Body(
  2021. True, description="Whether to include column headers"
  2022. ),
  2023. auth_user=Depends(self.providers.auth.auth_wrapper()),
  2024. ) -> FileResponse:
  2025. """Export documents as a downloadable CSV file."""
  2026. if not auth_user.is_superuser:
  2027. raise R2RException(
  2028. "Only a superuser can export data.",
  2029. 403,
  2030. )
  2031. (
  2032. csv_file_path,
  2033. temp_file,
  2034. ) = await self.services.management.export_document_relationships(
  2035. id=id,
  2036. columns=columns,
  2037. filters=filters,
  2038. include_header=include_header
  2039. if include_header is not None
  2040. else True,
  2041. )
  2042. background_tasks.add_task(temp_file.close)
  2043. return FileResponse(
  2044. path=csv_file_path,
  2045. media_type="text/csv",
  2046. filename="documents_export.csv",
  2047. )
  2048. @self.router.post(
  2049. "/documents/search",
  2050. dependencies=[Depends(self.rate_limit_dependency)],
  2051. summary="Search document summaries",
  2052. )
  2053. @self.base_endpoint
  2054. async def search_documents(
  2055. query: str = Body(
  2056. ...,
  2057. description="The search query to perform.",
  2058. ),
  2059. search_mode: SearchMode = Body(
  2060. default=SearchMode.custom,
  2061. description=(
  2062. "Default value of `custom` allows full control over search settings.\n\n"
  2063. "Pre-configured search modes:\n"
  2064. "`basic`: A simple semantic-based search.\n"
  2065. "`advanced`: A more powerful hybrid search combining semantic and full-text.\n"
  2066. "`custom`: Full control via `search_settings`.\n\n"
  2067. "If `filters` or `limit` are provided alongside `basic` or `advanced`, "
  2068. "they will override the default settings for that mode."
  2069. ),
  2070. ),
  2071. search_settings: SearchSettings = Body(
  2072. default_factory=SearchSettings,
  2073. description="Settings for document search",
  2074. ),
  2075. auth_user=Depends(self.providers.auth.auth_wrapper()),
  2076. ) -> WrappedDocumentSearchResponse:
  2077. """Perform a search query on the automatically generated document
  2078. summaries in the system.
  2079. This endpoint allows for complex filtering of search results using PostgreSQL-based queries.
  2080. Filters can be applied to various fields such as document_id, and internal metadata values.
  2081. Allowed operators include `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, `like`, `ilike`, `in`, and `nin`.
  2082. """
  2083. effective_settings = self._prepare_search_settings(
  2084. auth_user, search_mode, search_settings
  2085. )
  2086. query_embedding = (
  2087. await self.providers.embedding.async_get_embedding(query)
  2088. )
  2089. results = await self.services.retrieval.search_documents(
  2090. query=query,
  2091. query_embedding=query_embedding,
  2092. settings=effective_settings,
  2093. )
  2094. return results # type: ignore
  2095. @staticmethod
  2096. async def _process_file(file):
  2097. import base64
  2098. content = await file.read()
  2099. return {
  2100. "filename": file.filename,
  2101. "content": base64.b64encode(content).decode("utf-8"),
  2102. "content_type": file.content_type,
  2103. "content_length": len(content),
  2104. }