jack 3 kuukautta sitten
vanhempi
commit
389d4cc78b

+ 10 - 0
app/core/runner/llm_backend.py

@@ -27,11 +27,15 @@ class LLMBackend:
         temperature=None,
         top_p=None,
         response_format=None,
+        parallel_tool_calls=False,
+        audio=None,
+        modalities=None,
     ) -> ChatCompletion | Stream[ChatCompletionChunk]:
         chat_params = {
             "messages": messages,
             "model": model,
             "stream": stream,
+            "parallel_tool_calls": False,
         }
         if extra_body:
             model_params = extra_body.get("model_params")
@@ -53,6 +57,12 @@ class LLMBackend:
                     chat_params["stream_options"] = {
                         "include_usage": bool(stream_options["include_usage"])
                     }
+        if parallel_tool_calls:
+            chat_params["parallel_tool_calls"] = parallel_tool_calls
+        if audio:
+            chat_params["audio"] = audio
+        if modalities:
+            chat_params["modalities"] = modalities
         if temperature:
             chat_params["temperature"] = temperature
         if top_p:

+ 3 - 0
app/core/runner/thread_runner.py

@@ -159,6 +159,9 @@ class ThreadRunner:
             temperature=run.temperature,
             top_p=run.top_p,
             response_format=run.response_format,
+            response_format=run.parallel_tool_calls,
+            response_format=run.audio,
+            response_format=run.modalities,
         )
 
         # create message callback

+ 7 - 4
app/core/tools/file_search_tool.py

@@ -104,10 +104,13 @@ class FileSearchTool(BaseTool):
         files = []
         ## 必须有总结的内容query和才能触发
         if self.index == 0 and query:
-            files = FileService.search_in_files(
-                query=query, file_keys=self.__keys, folder_keys=self.__dirkeys
-            )
-            self.index = 1
+            try:
+                files = FileService.search_in_files(
+                    query=query, file_keys=self.__keys, folder_keys=self.__dirkeys
+                )
+                self.index = 1
+            except Exception as e:
+                print(e)
         # print(files)
         return files
 

+ 29 - 1
app/models/run.py

@@ -1,5 +1,5 @@
 from datetime import datetime
-from typing import Optional, Any, Union
+from typing import Optional, Any, Iterable, Dict, List, Union
 
 from pydantic import Field as PDField
 
@@ -12,6 +12,28 @@ from pydantic import model_validator
 from app.models.base_model import BaseModel, TimeStampMixin, PrimaryKeyMixin
 from app.models.message import MessageCreate
 from app.schemas.tool.authentication import Authentication
+from typing_extensions import Literal, Required, TypedDict, TypeAlias
+
+
+class ChatCompletionAudioParam(TypedDict, total=False):
+    format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
+    """Specifies the output audio format.
+
+    Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
+    """
+
+    voice: Required[
+        Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    ]
+    """The voice the model uses to respond.
+
+    Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also
+    supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices
+    are less expressive).
+    """
+
+
+ChatCompletionModality: TypeAlias = Literal["text", "audio"]
 
 
 class RunBase(BaseModel):
@@ -71,6 +93,9 @@ class RunBase(BaseModel):
     usage: Optional[dict] = Field(default=None, sa_column=Column(JSON))  # 调用使用情况
     temperature: Optional[float] = Field(default=None)  # 温度
     top_p: Optional[float] = Field(default=None)  # top_p
+    parallel_tool_calls: bool = Field(default=False)  # parallel_tool_calls
+    audio: Optional[ChatCompletionAudioParam] = Field(default=None)  # audio
+    modalities: Optional[List[ChatCompletionModality]] = Field(default=None)
 
 
 class Run(RunBase, PrimaryKeyMixin, TimeStampMixin, table=True):
@@ -107,6 +132,9 @@ class RunCreate(BaseModel):
     tool_choice: Optional[str] = Field(default=None)  # 工具选择
     temperature: Optional[float] = Field(default=None)  # 温度
     top_p: Optional[float] = Field(default=None)  # top_p
+    parallel_tool_calls: bool = Field(default=False)  # parallel_tool_calls
+    audio: Optional[ChatCompletionAudioParam] = Field(default=None)  # audio
+    modalities: Optional[List[ChatCompletionModality]] = Field(default=None)
 
     @model_validator(mode="before")
     def model_validator(cls, data: Any):