From 2d564afd979e7bc9eee8204450254c9f86b663b5 Mon Sep 17 00:00:00 2001 From: Kumi Date: Thu, 15 Feb 2024 18:11:19 +0100 Subject: [PATCH] Fix parameter passing in chat response calls Standardize the passing of 'messages' argument across various calls to generate_chat_response method to ensure consistency and prevent potential bugs in the GPT bot's response generation. The 'model' parameter in one instance has been corrected to 'original_model' for proper context loading. These changes improve code clarity and maintain the intended message flow within the bot's conversation handling. --- pyproject.toml | 2 +- src/gptbot/classes/openai.py | 23 +++++++++++++---------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 271dfae..c067b53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ allow-direct-references = true [project] name = "matrix-gptbot" -version = "0.3.3" +version = "0.3.4" authors = [ { name="Kumi Mitterer", email="gptbot@kumi.email" }, diff --git a/src/gptbot/classes/openai.py b/src/gptbot/classes/openai.py index 612a124..16b22ac 100644 --- a/src/gptbot/classes/openai.py +++ b/src/gptbot/classes/openai.py @@ -164,7 +164,7 @@ class OpenAI: if count > 5: self.logger.log(f"Recursion depth exceeded, aborting.") return await self.generate_chat_response( - messages, + messsages=messages, user=user, room=room, allow_override=False, # TODO: Could this be a problem? @@ -298,7 +298,7 @@ class OpenAI: return (e.args[0] if e.args else False), 0 except Handover: return await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -318,7 +318,7 @@ class OpenAI: + original_messages[-1:] ) result_text, additional_tokens = await self.generate_chat_response( - messages, user=user, room=room, model=original_messages + messages=messages, user=user, room=room, model=original_model ) except openai.APIError as e: if e.code == "max_tokens": @@ -345,7 +345,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - new_messages, + messages=new_messages, user=user, room=room, allow_override=False, @@ -359,7 +359,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -407,7 +407,7 @@ class OpenAI: return (e.args[0] if e.args else False), 0 except Handover: return await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -430,7 +430,10 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - messages, user=user, room=room + messages=messages, + user=user, + room=room, + model=original_model, ) except openai.APIError as e: if e.code == "max_tokens": @@ -438,7 +441,7 @@ class OpenAI: result_text, additional_tokens, ) = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -449,7 +452,7 @@ class OpenAI: raise e else: result_text, additional_tokens = await self.generate_chat_response( - original_messages, + messages=original_messages, user=user, room=room, allow_override=False, @@ -474,7 +477,7 @@ class OpenAI: new_messages.append(new_message) result_text, additional_tokens = await self.generate_chat_response( - new_messages, + messages=new_messages, user=user, room=room, allow_override=False,