llm_callback_handler.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. import logging
  2. from openai import Stream
  3. from openai.types.chat import ChatCompletionChunk, ChatCompletionMessage
  4. from app.core.runner.pub_handler import StreamEventHandler
  5. from app.core.runner.utils import message_util
  6. class LLMCallbackHandler:
  7. """
  8. LLM chat callback handler, handling message sending and message merging
  9. """
  10. def __init__(
  11. self,
  12. run_id: str,
  13. on_step_create_func,
  14. on_message_create_func,
  15. event_handler: StreamEventHandler,
  16. ) -> None:
  17. super().__init__()
  18. self.run_id = run_id
  19. self.final_message_started = False
  20. self.on_step_create_func = on_step_create_func
  21. self.step = None
  22. self.on_message_create_func = on_message_create_func
  23. self.message = None
  24. self.event_handler: StreamEventHandler = event_handler
  25. def handle_llm_response(
  26. self,
  27. response_stream: Stream[ChatCompletionChunk],
  28. ) -> ChatCompletionMessage:
  29. """
  30. Handle LLM response stream
  31. :param response_stream: ChatCompletionChunk stream
  32. :return: ChatCompletionMessage
  33. """
  34. message = ChatCompletionMessage(content="", role="assistant", tool_calls=[])
  35. index = 0
  36. try:
  37. for chunk in response_stream:
  38. logging.debug(chunk)
  39. if not chunk.choices:
  40. if chunk.usage:
  41. self.event_handler.pub_message_usage(chunk)
  42. continue
  43. continue
  44. choice = chunk.choices[0]
  45. logging.debug(choice)
  46. delta = choice.delta
  47. logging.debug(delta)
  48. if not delta:
  49. if chunk.usage:
  50. self.event_handler.pub_message_usage(chunk)
  51. continue
  52. continue
  53. logging.debug(
  54. "delta.tool_callstool_callstool_callstool_callstool_callstool_callstool_callstool_callstool_callstool_calls"
  55. )
  56. logging.debug(delta.tool_calls)
  57. # merge tool call delta
  58. if delta.tool_calls:
  59. for tool_call_delta in delta.tool_calls:
  60. message_util.merge_tool_call_delta(
  61. message.tool_calls, tool_call_delta
  62. )
  63. elif delta.content is not None:
  64. # call on delta message received
  65. if not self.final_message_started:
  66. self.final_message_started = True
  67. self.message = self.on_message_create_func(content="")
  68. self.step = self.on_step_create_func(self.message.id)
  69. logging.debug(
  70. "create message and step (%s), (%s)",
  71. self.message,
  72. self.step,
  73. )
  74. self.event_handler.pub_run_step_created(self.step)
  75. self.event_handler.pub_run_step_in_progress(self.step)
  76. self.event_handler.pub_message_created(self.message)
  77. self.event_handler.pub_message_in_progress(self.message)
  78. # append message content delta
  79. message.content += delta.content
  80. self.event_handler.pub_message_delta(
  81. self.message.id, index, delta.content, delta.role
  82. )
  83. if chunk.usage:
  84. self.event_handler.pub_message_usage(chunk)
  85. continue
  86. except Exception as e:
  87. logging.error("handle_llm_response error: %s", e)
  88. raise e
  89. print(
  90. "handle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_responsehandle_llm_response"
  91. )
  92. print(message)
  93. return message