file_search_tool.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. from typing import Type, List
  2. from pydantic import BaseModel, Field
  3. from sqlalchemy.orm import Session
  4. from app.core.tools.base_tool import BaseTool
  5. from app.models.run import Run
  6. from app.services.file.file import FileService
  7. from app.services.assistant.assistant import AssistantService
  8. import asyncio
  9. import nest_asyncio
  10. # 使得异步代码可以在已运行的事件循环中嵌套
  11. nest_asyncio.apply()
  12. # asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
  13. class FileSearchToolInput(BaseModel):
  14. indexes: List[int] = Field(
  15. ..., description="file index list to look up in retrieval"
  16. )
  17. query: str = Field(..., description="query to look up in retrieval")
  18. class FileSearchTool(BaseTool):
  19. name: str = "file_search"
  20. description: str = (
  21. "Can be used to look up information that was uploaded to this assistant."
  22. "If the user is referencing particular files, that is often a good hint that information may be here."
  23. )
  24. args_schema: Type[BaseModel] = FileSearchToolInput
  25. def __init__(self) -> None:
  26. super().__init__()
  27. self.__filenames = []
  28. self.__keys = []
  29. self.loop = None
  30. def configure(self, session: Session, run: Run, **kwargs):
  31. """
  32. # 提交任务到事件循环
  33. future = asyncio.run_coroutine_threadsafe(async_task(), loop)
  34. # 阻塞等待结果
  35. result = future.result()
  36. """
  37. """
  38. 置当前 Retrieval 涉及文件信息
  39. """
  40. # 获取当前事件循环
  41. document_id = []
  42. file_key = []
  43. filesinfo = []
  44. # 后语要从知识库里选择文件,所以在openassistant的数据库里可能不存在
  45. for key in run.file_ids:
  46. if len(key) == 36:
  47. document_id.append(key)
  48. else:
  49. file_key.append(key)
  50. print(
  51. "document_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_iddocument_id"
  52. )
  53. print(document_id)
  54. print(file_key)
  55. files = []
  56. # 这种情况是uuid.ex 这种格式的在最早的时候存在的,后续要去掉
  57. if len(file_key) > 0:
  58. ## 获取文件信息
  59. files = FileService.get_file_list_by_ids(session=session, file_ids=file_key)
  60. print(files)
  61. # r2r接口不提供多条件,否则上面没必要存在
  62. if len(document_id) > 0:
  63. filesinfo += FileService.list_in_files(ids=document_id, offset=0, limit=100)
  64. # asyncio.run(
  65. # FileService.list_in_files(ids=document_id, offset=0, limit=100)
  66. # )
  67. for file in filesinfo:
  68. self.__filenames.append(file.get("title"))
  69. self.__keys.append(file.get("id"))
  70. print(filesinfo)
  71. # files = FileService.list_in_files(ids=run.file_ids, offset=0, limit=100)
  72. db_asst = AssistantService.get_assistant_sync(
  73. session=session, assistant_id=run.assistant_id
  74. )
  75. if db_asst.tool_resources and "file_search" in db_asst.tool_resources:
  76. ##{"file_search": {"vector_store_ids": [{"file_ids": []}]}}
  77. asst_folder_ids = (
  78. db_asst.tool_resources.get("file_search")
  79. .get("vector_stores")[0]
  80. .get("folder_ids")
  81. )
  82. print(asst_folder_ids)
  83. folder_fileinfo = []
  84. if asst_folder_ids:
  85. for fid in asst_folder_ids:
  86. folder_fileinfo += FileService.list_documents(
  87. id=fid, offset=0, limit=100
  88. )
  89. # folder_fileinfo += asyncio.run(
  90. # FileService.list_documents(id=fid, offset=0, limit=100)
  91. # )
  92. print(folder_fileinfo)
  93. for file in folder_fileinfo:
  94. self.__filenames.append(file.get("title"))
  95. self.__keys.append(file.get("id"))
  96. # pre-cache data to prevent thread conflicts that may occur later on.
  97. print(
  98. "---------ssssssssssss-----------------sssssssssssss---------------ssssssssssssss-------------sssssssssssss-------------ss-------"
  99. )
  100. print(files)
  101. for file in files:
  102. self.__filenames.append(file.filename)
  103. self.__keys.append(file.key)
  104. print(self.__keys)
  105. def run(self, indexes: List[int], query: str) -> dict:
  106. file_keys = []
  107. for index in indexes:
  108. if index is not None:
  109. file_key = self.__keys[index]
  110. file_keys.append(file_key)
  111. print(
  112. "file_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keysfile_keys"
  113. )
  114. print(file_keys)
  115. files = []
  116. if len(file_keys) > 0:
  117. # self.loop = asyncio.get_event_loop()
  118. files = FileService.search_in_files(query=query, file_keys=file_keys)
  119. # files = asyncio.run(
  120. # FileService.search_in_files(query=query, file_keys=file_keys)
  121. # )
  122. print(files)
  123. return files
  124. def instruction_supplement(self) -> str:
  125. """
  126. 为 Retrieval 提供文件选择信息,用于 llm 调用抉择
  127. """
  128. if len(self.__filenames) == 0:
  129. return ""
  130. else:
  131. filenames_info = [
  132. f"({index}){filename}"
  133. for index, filename in enumerate(self.__filenames)
  134. ]
  135. return (
  136. 'You can use the "retrieval" tool to retrieve relevant context from the following attached files. '
  137. + 'Each line represents a file in the format "(index)filename":\n'
  138. + "\n".join(filenames_info)
  139. + "\nMake sure to be extremely concise when using attached files. "
  140. )