diff --git a/pyproject.toml b/pyproject.toml index 271dfae..c067b53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ allow-direct-references = true [project] name = "matrix-gptbot" -version = "0.3.3" +version = "0.3.4" authors = [ { name="Kumi Mitterer", email="gptbot@kumi.email" }, diff --git a/src/gptbot/classes/openai.py b/src/gptbot/classes/openai.py index 612a124..16b22ac 100644 --- a/src/gptbot/classes/openai.py +++ b/src/gptbot/classes/openai.py @@ -164,7 +164,7 @@ class OpenAI: if count > 5: self.logger.log(f"Recursion depth exceeded, aborting.") return await self.generate_chat_response( - messages, + messsages=messages, user=user, room=room, allow_override=False, # TODO: Could this be a problem? @@ -298,7 +298,7 @@ class OpenAI: return (e.args[0] if e.args else False), 0 except Handover: return await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -318,7 +318,7 @@ class OpenAI: + original_messages[-1:] ) result_text, additional_tokens = await self.generate_chat_response( - messages, user=user, room=room, model=original_messages + messages=messages, user=user, room=room, model=original_model ) except openai.APIError as e: if e.code == "max_tokens": @@ -345,7 +345,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - new_messages, + messages=new_messages, user=user, room=room, allow_override=False, @@ -359,7 +359,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -407,7 +407,7 @@ class OpenAI: return (e.args[0] if e.args else False), 0 except Handover: return await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -430,7 +430,10 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - messages, user=user, room=room + messages=messages, + user=user, + room=room, + model=original_model, ) except openai.APIError as e: if e.code == "max_tokens": @@ -438,7 +441,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -449,7 +452,7 @@ class OpenAI: raise e else: result_text, additional_tokens = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -474,7 +477,7 @@ class OpenAI: new_messages.append(new_message) result_text, additional_tokens = await self.generate_chat_response( - new_messages, + messages=new_messages, user=user, room=room, allow_override=False,