llm_backend.py 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. import logging
  2. from typing import List
  3. from openai import OpenAI, Stream
  4. from openai.types.chat import ChatCompletionChunk, ChatCompletion
  5. class LLMBackend:
  6. """
  7. openai chat 接口封装
  8. """
  9. def __init__(self, base_url: str, api_key) -> None:
  10. self.base_url = base_url + "/" if base_url else None
  11. self.api_key = api_key
  12. self.client = OpenAI(base_url=self.base_url, api_key=self.api_key)
  13. def run(
  14. self,
  15. messages: List,
  16. model: str,
  17. tools: List = None,
  18. tool_choice="auto",
  19. stream=False,
  20. stream_options=None,
  21. extra_body=None,
  22. temperature=None,
  23. top_p=None,
  24. response_format=None,
  25. ) -> ChatCompletion | Stream[ChatCompletionChunk]:
  26. chat_params = {
  27. "messages": messages,
  28. "model": model,
  29. "stream": stream,
  30. }
  31. if extra_body:
  32. model_params = extra_body.get("model_params")
  33. if model_params:
  34. if "n" in model_params:
  35. raise ValueError("n is not allowed in model_params")
  36. chat_params.update(model_params)
  37. if stream_options:
  38. if isinstance(stream_options, dict):
  39. if "include_usage" in stream_options:
  40. chat_params["stream_options"] = {"include_usage": bool(stream_options["include_usage"])}
  41. if temperature:
  42. chat_params["temperature"] = temperature
  43. if top_p:
  44. chat_params["top_p"] = top_p
  45. if tools:
  46. chat_params["tools"] = tools
  47. chat_params["tool_choice"] = tool_choice if tool_choice else "auto"
  48. if isinstance(response_format, dict) and response_format.get("type") == "json_object":
  49. chat_params["response_format"] = {"type": "json_object"}
  50. for message in chat_params['messages']:
  51. if 'content' not in message:
  52. message['content'] = ""
  53. logging.info("chat_params: %s", chat_params)
  54. response = self.client.chat.completions.create(**chat_params)
  55. logging.info("chat_response: %s", response)
  56. return response