r2r_file.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. import tempfile
  2. import uuid
  3. from typing import List
  4. import aiofiles
  5. import aiofiles.os
  6. from fastapi import UploadFile
  7. from sqlalchemy.ext.asyncio import AsyncSession
  8. from app.models import File
  9. from app.providers.r2r import r2r
  10. from app.providers.storage import storage
  11. from app.services.file.impl.oss_file import OSSFileService
  12. # import asyncio
  13. from pathlib import Path
  14. class R2RFileService(OSSFileService):
  15. @staticmethod
  16. async def create_file(
  17. *, session: AsyncSession, purpose: str, file: UploadFile
  18. ) -> File:
  19. # 文件是否存在
  20. # statement = (
  21. # select(File)
  22. # .where(File.purpose == purpose)
  23. # .where(File.filename == file.filename)
  24. # .where(File.bytes == file.size)
  25. # )
  26. # result = await session.execute(statement)
  27. # ext_file = result.scalars().first()
  28. # if ext_file is not None:
  29. # # TODO: 文件去重策略
  30. # return ext_file
  31. file_extension = Path(file.filename).suffix
  32. file_key = f"{uuid.uuid4()}{file_extension}"
  33. print(file_key)
  34. fileinfo = {"document_id": file_key}
  35. # file_key = f"{uuid.uuid4()}-{file.filename}"
  36. with tempfile.NamedTemporaryFile(
  37. suffix="_" + file.filename, delete=True
  38. ) as temp_file:
  39. tmp_file_path = temp_file.name
  40. async with aiofiles.open(tmp_file_path, "wb") as f:
  41. while content := await file.read(1024):
  42. await f.write(content)
  43. # storage.save_from_path(filename=file_key, local_file_path=tmp_file_path)
  44. await r2r.init()
  45. fileinfo = await r2r.ingest_file(
  46. file_path=tmp_file_path,
  47. metadata={"file_key": file_key, "title": file.filename},
  48. )
  49. fileinfo = fileinfo.results
  50. # 存储
  51. db_file = File(
  52. purpose=purpose,
  53. filename=file.filename,
  54. bytes=file.size,
  55. key=fileinfo["document_id"],
  56. )
  57. session.add(db_file)
  58. await session.commit()
  59. await session.refresh(db_file)
  60. return db_file
  61. @staticmethod
  62. def search_in_files(
  63. query: str, file_keys: List[str], folder_keys: List[str] = None
  64. ) -> dict:
  65. files = {}
  66. file_key = {"$in": []}
  67. document_id = {"$in": []}
  68. filters = {"$or": []}
  69. print(
  70. "ggggggggggggggggggggggggggggggggggggddddddddddddddddddccccccccccccccccccccc"
  71. )
  72. for key in file_keys:
  73. if len(key) == 36:
  74. document_id["$in"].append(key)
  75. else:
  76. file_key["$in"].append(key)
  77. if len(document_id["$in"]) > 0:
  78. filters["$or"].append({"document_id": document_id})
  79. if len(file_key["$in"]) > 0:
  80. filters["$or"].append({"file_key": file_key})
  81. print(file_key)
  82. print(document_id)
  83. print(filters)
  84. print(folder_keys)
  85. if folder_keys:
  86. filters["$or"].append({"collection_ids": {"$overlap": folder_keys}})
  87. ## {"$or": [filters, {"collection_ids": {"$in": folder_keys}}]}
  88. ##filters["collection_ids"] = {"$overlap": folder_keys}
  89. ## {"$and": {"$document_id": ..., "collection_ids": ...}}
  90. """
  91. {
  92. "$or": [
  93. {"document_id": {"$eq": "9fbe403b-..."}},
  94. {"collection_ids": {"$in": ["122fdf6a-...", "..."]}}
  95. ]
  96. }
  97. """
  98. print(filters)
  99. if len(filters["$or"]) < 2:
  100. filters = filters["$or"][0]
  101. print("filtersfiltersfiltersfiltersfiltersfiltersfiltersfiltersfiltersfilters")
  102. print(filters)
  103. """
  104. loop = asyncio.get_event_loop() # 获取当前事件循环
  105. loop.run_until_complete(r2r.init()) # 确保 r2r 已初始化
  106. search_results = loop.run_until_complete(r2r.search(query, filters=filters))
  107. asyncio.run(r2r.init())
  108. search_results = asyncio.run(r2r.search(query, filters=filters))
  109. search_results = loop.run_until_complete(
  110. r2r.search(query, filters={"file_key": {"$in": file_keys}})
  111. )
  112. """
  113. r2r.init_sync()
  114. search_results = r2r.search(query, filters=filters)
  115. if not search_results:
  116. return files
  117. for doc in search_results:
  118. file_key = doc.get("metadata").get("file_key")
  119. file_key = (
  120. doc.get("metadata").get("title") if file_key is None else file_key
  121. )
  122. text = doc.get("text")
  123. if file_key in files and files[file_key]:
  124. files[file_key] += f"\n\n{text}"
  125. else:
  126. files[file_key] = doc.get("text")
  127. return files
  128. @staticmethod
  129. def list_in_files(
  130. ids: list[str] = None,
  131. offset: int = 0,
  132. limit: int = 100,
  133. ) -> dict:
  134. """
  135. loop = asyncio.get_event_loop() # 获取当前事件循环
  136. loop.run_until_complete(r2r.init()) # 确保 r2r 已初始化
  137. list_results = loop.run_until_complete(
  138. r2r.list(ids=ids, offset=offset, limit=limit)
  139. )
  140. asyncio.run(r2r.init())
  141. list_results = asyncio.run(r2r.list(ids=ids, offset=offset, limit=limit))
  142. """
  143. r2r.init_sync()
  144. list_results = r2r.list(ids=ids, offset=offset, limit=limit)
  145. return list_results
  146. @staticmethod
  147. def list_documents(
  148. id: str = "",
  149. offset: int = 0,
  150. limit: int = 100,
  151. ) -> dict:
  152. """
  153. loop = asyncio.get_event_loop() # 获取当前事件循环
  154. loop.run_until_complete(r2r.init()) # 确保 r2r 已初始化
  155. list_results = loop.run_until_complete(
  156. r2r.list_documents(id=id, offset=offset, limit=limit)
  157. )
  158. asyncio.run(r2r.init())
  159. list_results = asyncio.run(
  160. r2r.list_documents(id=id, offset=offset, limit=limit)
  161. )
  162. """
  163. r2r.init_sync()
  164. list_results = r2r.list_documents(id=id, offset=offset, limit=limit)
  165. return list_results
  166. @staticmethod
  167. def list_chunks(ids: list[str]) -> dict:
  168. if len(ids) > 0:
  169. r2r.init_sync()
  170. list_results = r2r.list_chunks(ids=ids)
  171. files = {}
  172. for doc in list_results:
  173. file_key = doc.get("metadata").get("file_key")
  174. file_key = (
  175. doc.get("metadata").get("title") if file_key is None else file_key
  176. )
  177. text = doc.get("text")
  178. if file_key in files and files[file_key]:
  179. files[file_key] += f"\n\n{text}"
  180. else:
  181. files[file_key] = doc.get("text")
  182. return list_results
  183. return {}
  184. # TODO 删除s3&r2r文件