@@ -31,6 +31,9 @@ class LLMBackend:
audio=None,
modalities=None,
) -> ChatCompletion | Stream[ChatCompletionChunk]:
+ if any(model.startswith(prefix) for prefix in ["o1", "o3", "gpt-5"]):
+ temperature = None
+ top_p = None
chat_params = {
"messages": messages,
"model": model,