extraction.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. import asyncio
  2. import json
  3. import logging
  4. import re
  5. import time
  6. from typing import Any, AsyncGenerator, Optional, Union
  7. from core.base import (
  8. AsyncState,
  9. CompletionProvider,
  10. DocumentChunk,
  11. Entity,
  12. GenerationConfig,
  13. KGExtraction,
  14. R2RDocumentProcessingError,
  15. R2RException,
  16. Relationship,
  17. )
  18. from core.base.pipes.base_pipe import AsyncPipe
  19. from ...database.postgres import PostgresDatabaseProvider
  20. logger = logging.getLogger()
  21. MIN_VALID_KG_EXTRACTION_RESPONSE_LENGTH = 128
  22. class ClientError(Exception):
  23. """Base class for client connection errors."""
  24. pass
  25. class KGExtractionPipe(AsyncPipe[dict]):
  26. """
  27. Extracts knowledge graph information from document extractions.
  28. """
  29. # TODO - Apply correct type hints to storage messages
  30. class Input(AsyncPipe.Input):
  31. message: dict
  32. def __init__(
  33. self,
  34. database_provider: PostgresDatabaseProvider,
  35. llm_provider: CompletionProvider,
  36. config: AsyncPipe.PipeConfig,
  37. kg_batch_size: int = 1,
  38. graph_rag: bool = True,
  39. id_prefix: str = "demo",
  40. *args,
  41. **kwargs,
  42. ):
  43. super().__init__(
  44. config=config
  45. or AsyncPipe.PipeConfig(
  46. name="default_kg_relationships_extraction_pipe"
  47. ),
  48. )
  49. self.database_provider = database_provider
  50. self.llm_provider = llm_provider
  51. self.kg_batch_size = kg_batch_size
  52. self.id_prefix = id_prefix
  53. self.pipe_run_info = None
  54. self.graph_rag = graph_rag
  55. async def extract_kg(
  56. self,
  57. extractions: list[DocumentChunk],
  58. generation_config: GenerationConfig,
  59. max_knowledge_relationships: int,
  60. entity_types: list[str],
  61. relation_types: list[str],
  62. retries: int = 5,
  63. delay: int = 2,
  64. task_id: Optional[int] = None,
  65. total_tasks: Optional[int] = None,
  66. ) -> KGExtraction:
  67. """
  68. Extracts NER relationships from a extraction with retries.
  69. """
  70. # combine all extractions into a single string
  71. combined_extraction: str = " ".join([extraction.data for extraction in extractions]) # type: ignore
  72. messages = await self.database_provider.prompts_handler.get_message_payload(
  73. task_prompt_name=self.database_provider.config.graph_creation_settings.graphrag_relationships_extraction_few_shot,
  74. task_inputs={
  75. "input": combined_extraction,
  76. "max_knowledge_relationships": max_knowledge_relationships,
  77. "entity_types": "\n".join(entity_types),
  78. "relation_types": "\n".join(relation_types),
  79. },
  80. )
  81. for attempt in range(retries):
  82. try:
  83. response = await self.llm_provider.aget_completion(
  84. messages,
  85. generation_config=generation_config,
  86. )
  87. kg_extraction = response.choices[0].message.content
  88. if not kg_extraction:
  89. raise R2RException(
  90. "No knowledge graph extraction found in the response string, the selected LLM likely failed to format it's response correctly.",
  91. 400,
  92. )
  93. entity_pattern = (
  94. r'\("entity"\${4}([^$]+)\${4}([^$]+)\${4}([^$]+)\)'
  95. )
  96. relationship_pattern = r'\("relationship"\${4}([^$]+)\${4}([^$]+)\${4}([^$]+)\${4}([^$]+)\${4}(\d+(?:\.\d+)?)\)'
  97. def parse_fn(response_str: str) -> Any:
  98. entities = re.findall(entity_pattern, response_str)
  99. if (
  100. len(kg_extraction)
  101. > MIN_VALID_KG_EXTRACTION_RESPONSE_LENGTH
  102. and len(entities) == 0
  103. ):
  104. raise R2RException(
  105. f"No entities found in the response string, the selected LLM likely failed to format it's response correctly. {response_str}",
  106. 400,
  107. )
  108. relationships = re.findall(
  109. relationship_pattern, response_str
  110. )
  111. entities_arr = []
  112. for entity in entities:
  113. entity_value = entity[0]
  114. entity_category = entity[1]
  115. entity_description = entity[2]
  116. entities_arr.append(
  117. Entity(
  118. category=entity_category,
  119. description=entity_description,
  120. name=entity_value,
  121. parent_id=extractions[0].document_id,
  122. chunk_ids=[
  123. extraction.id for extraction in extractions
  124. ],
  125. attributes={},
  126. )
  127. )
  128. relations_arr = []
  129. for relationship in relationships:
  130. subject = relationship[0]
  131. object = relationship[1]
  132. predicate = relationship[2]
  133. description = relationship[3]
  134. weight = float(relationship[4])
  135. # check if subject and object are in entities_dict
  136. relations_arr.append(
  137. Relationship(
  138. subject=subject,
  139. predicate=predicate,
  140. object=object,
  141. description=description,
  142. weight=weight,
  143. parent_id=extractions[0].document_id,
  144. chunk_ids=[
  145. extraction.id for extraction in extractions
  146. ],
  147. attributes={},
  148. )
  149. )
  150. return entities_arr, relations_arr
  151. entities, relationships = parse_fn(kg_extraction)
  152. return KGExtraction(
  153. entities=entities,
  154. relationships=relationships,
  155. )
  156. except (
  157. ClientError,
  158. json.JSONDecodeError,
  159. KeyError,
  160. IndexError,
  161. R2RException,
  162. ) as e:
  163. if attempt < retries - 1:
  164. await asyncio.sleep(delay)
  165. else:
  166. logger.error(
  167. f"Failed after retries with for chunk {extractions[0].id} of document {extractions[0].document_id}: {e}"
  168. )
  169. # raise e # you should raise an error.
  170. # add metadata to entities and relationships
  171. logger.info(
  172. f"KGExtractionPipe: Completed task number {task_id} of {total_tasks} for document {extractions[0].document_id}",
  173. )
  174. return KGExtraction(
  175. entities=[],
  176. relationships=[],
  177. )
  178. async def _run_logic( # type: ignore
  179. self,
  180. input: Input,
  181. state: AsyncState,
  182. run_id: Any,
  183. *args: Any,
  184. **kwargs: Any,
  185. ) -> AsyncGenerator[Union[KGExtraction, R2RDocumentProcessingError], None]:
  186. start_time = time.time()
  187. document_id = input.message["document_id"]
  188. generation_config = input.message["generation_config"]
  189. chunk_merge_count = input.message["chunk_merge_count"]
  190. max_knowledge_relationships = input.message[
  191. "max_knowledge_relationships"
  192. ]
  193. entity_types = input.message["entity_types"]
  194. relation_types = input.message["relation_types"]
  195. filter_out_existing_chunks = input.message.get(
  196. "filter_out_existing_chunks", True
  197. )
  198. logger = input.message.get("logger", logging.getLogger())
  199. logger.info(
  200. f"KGExtractionPipe: Processing document {document_id} for KG extraction",
  201. )
  202. # Then create the extractions from the results
  203. extractions = [
  204. DocumentChunk(
  205. id=extraction["id"],
  206. document_id=extraction["document_id"],
  207. owner_id=extraction["owner_id"],
  208. collection_ids=extraction["collection_ids"],
  209. data=extraction["text"],
  210. metadata=extraction["metadata"],
  211. )
  212. for extraction in (
  213. await self.database_provider.documents_handler.list_document_chunks( # FIXME: This was using the pagination defaults from before... We need to review if this is as intended.
  214. document_id=document_id,
  215. offset=0,
  216. limit=100,
  217. )
  218. )["results"]
  219. ]
  220. logger.info(
  221. f"Found {len(extractions)} extractions for document {document_id}"
  222. )
  223. if filter_out_existing_chunks:
  224. existing_chunk_ids = await self.database_provider.graphs_handler.get_existing_document_entity_chunk_ids(
  225. document_id=document_id
  226. )
  227. extractions = [
  228. extraction
  229. for extraction in extractions
  230. if extraction.id not in existing_chunk_ids
  231. ]
  232. logger.info(
  233. f"Filtered out {len(existing_chunk_ids)} existing extractions, remaining {len(extractions)} extractions for document {document_id}"
  234. )
  235. if len(extractions) == 0:
  236. logger.info(f"No extractions left for document {document_id}")
  237. return
  238. logger.info(
  239. f"KGExtractionPipe: Obtained {len(extractions)} extractions to process, time from start: {time.time() - start_time:.2f} seconds",
  240. )
  241. # sort the extractions accroding to chunk_order field in metadata in ascending order
  242. extractions = sorted(
  243. extractions,
  244. key=lambda x: x.metadata.get("chunk_order", float("inf")),
  245. )
  246. # group these extractions into groups of chunk_merge_count
  247. extractions_groups = [
  248. extractions[i : i + chunk_merge_count]
  249. for i in range(0, len(extractions), chunk_merge_count)
  250. ]
  251. logger.info(
  252. f"KGExtractionPipe: Extracting KG Relationships for document and created {len(extractions_groups)} tasks, time from start: {time.time() - start_time:.2f} seconds",
  253. )
  254. tasks = [
  255. asyncio.create_task(
  256. self.extract_kg(
  257. extractions=extractions_group,
  258. generation_config=generation_config,
  259. max_knowledge_relationships=max_knowledge_relationships,
  260. entity_types=entity_types,
  261. relation_types=relation_types,
  262. task_id=task_id,
  263. total_tasks=len(extractions_groups),
  264. )
  265. )
  266. for task_id, extractions_group in enumerate(extractions_groups)
  267. ]
  268. completed_tasks = 0
  269. total_tasks = len(tasks)
  270. logger.info(
  271. f"KGExtractionPipe: Waiting for {total_tasks} KG extraction tasks to complete",
  272. )
  273. for completed_task in asyncio.as_completed(tasks):
  274. try:
  275. yield await completed_task
  276. completed_tasks += 1
  277. if completed_tasks % 100 == 0:
  278. logger.info(
  279. f"KGExtractionPipe: Completed {completed_tasks}/{total_tasks} KG extraction tasks",
  280. )
  281. except Exception as e:
  282. logger.error(f"Error in Extracting KG Relationships: {e}")
  283. yield R2RDocumentProcessingError(
  284. document_id=document_id,
  285. error_message=str(e),
  286. )
  287. logger.info(
  288. f"KGExtractionPipe: Completed {completed_tasks}/{total_tasks} KG extraction tasks, time from start: {time.time() - start_time:.2f} seconds",
  289. )