Fix parameter passing in chat response calls

Standardize the passing of 'messages' argument across various calls to
generate_chat_response method to ensure consistency and prevent
potential bugs in the GPT bot's response generation. The 'model'
parameter in one instance has been corrected to 'original_model' for
proper context loading. These changes improve code clarity and maintain
the intended message flow within the bot's conversation handling.
This commit is contained in:
Kumi 2024-02-15 18:11:19 +01:00
parent 10b74187eb
commit 2d564afd97
Signed by: kumi
GPG key ID: ECBCC9082395383F
2 changed files with 14 additions and 11 deletions

View file

@ -7,7 +7,7 @@ allow-direct-references = true
[project]
name = "matrix-gptbot"
version = "0.3.3"
version = "0.3.4"
authors = [
{ name="Kumi Mitterer", email="gptbot@kumi.email" },

View file

@ -164,7 +164,7 @@ class OpenAI:
if count > 5:
self.logger.log(f"Recursion depth exceeded, aborting.")
return await self.generate_chat_response(
messages,
messsages=messages,
user=user,
room=room,
allow_override=False, # TODO: Could this be a problem?
@ -298,7 +298,7 @@ class OpenAI:
return (e.args[0] if e.args else False), 0
except Handover:
return await self.generate_chat_response(
original_messages,
messages=original_messages,
user=user,
room=room,
allow_override=False,
@ -318,7 +318,7 @@ class OpenAI:
+ original_messages[-1:]
)
result_text, additional_tokens = await self.generate_chat_response(
messages, user=user, room=room, model=original_messages
messages=messages, user=user, room=room, model=original_model
)
except openai.APIError as e:
if e.code == "max_tokens":
@ -345,7 +345,7 @@ class OpenAI:
result_text,
additional_tokens,
) = await self.generate_chat_response(
new_messages,
messages=new_messages,
user=user,
room=room,
allow_override=False,
@ -359,7 +359,7 @@ class OpenAI:
result_text,
additional_tokens,
) = await self.generate_chat_response(
original_messages,
messages=original_messages,
user=user,
room=room,
allow_override=False,
@ -407,7 +407,7 @@ class OpenAI:
return (e.args[0] if e.args else False), 0
except Handover:
return await self.generate_chat_response(
original_messages,
messages=original_messages,
user=user,
room=room,
allow_override=False,
@ -430,7 +430,10 @@ class OpenAI:
result_text,
additional_tokens,
) = await self.generate_chat_response(
messages, user=user, room=room
messages=messages,
user=user,
room=room,
model=original_model,
)
except openai.APIError as e:
if e.code == "max_tokens":
@ -438,7 +441,7 @@ class OpenAI:
result_text,
additional_tokens,
) = await self.generate_chat_response(
original_messages,
messages=original_messages,
user=user,
room=room,
allow_override=False,
@ -449,7 +452,7 @@ class OpenAI:
raise e
else:
result_text, additional_tokens = await self.generate_chat_response(
original_messages,
messages=original_messages,
user=user,
room=room,
allow_override=False,
@ -474,7 +477,7 @@ class OpenAI:
new_messages.append(new_message)
result_text, additional_tokens = await self.generate_chat_response(
new_messages,
messages=new_messages,
user=user,
room=room,
allow_override=False,