Fix parameter passing in chat response calls
Standardize the passing of 'messages' argument across various calls to generate_chat_response method to ensure consistency and prevent potential bugs in the GPT bot's response generation. The 'model' parameter in one instance has been corrected to 'original_model' for proper context loading. These changes improve code clarity and maintain the intended message flow within the bot's conversation handling.
This commit is contained in:
parent
10b74187eb
commit
2d564afd97
2 changed files with 14 additions and 11 deletions
|
@ -7,7 +7,7 @@ allow-direct-references = true
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "matrix-gptbot"
|
name = "matrix-gptbot"
|
||||||
version = "0.3.3"
|
version = "0.3.4"
|
||||||
|
|
||||||
authors = [
|
authors = [
|
||||||
{ name="Kumi Mitterer", email="gptbot@kumi.email" },
|
{ name="Kumi Mitterer", email="gptbot@kumi.email" },
|
||||||
|
|
|
@ -164,7 +164,7 @@ class OpenAI:
|
||||||
if count > 5:
|
if count > 5:
|
||||||
self.logger.log(f"Recursion depth exceeded, aborting.")
|
self.logger.log(f"Recursion depth exceeded, aborting.")
|
||||||
return await self.generate_chat_response(
|
return await self.generate_chat_response(
|
||||||
messages,
|
messsages=messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False, # TODO: Could this be a problem?
|
allow_override=False, # TODO: Could this be a problem?
|
||||||
|
@ -298,7 +298,7 @@ class OpenAI:
|
||||||
return (e.args[0] if e.args else False), 0
|
return (e.args[0] if e.args else False), 0
|
||||||
except Handover:
|
except Handover:
|
||||||
return await self.generate_chat_response(
|
return await self.generate_chat_response(
|
||||||
original_messages,
|
messages=original_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -318,7 +318,7 @@ class OpenAI:
|
||||||
+ original_messages[-1:]
|
+ original_messages[-1:]
|
||||||
)
|
)
|
||||||
result_text, additional_tokens = await self.generate_chat_response(
|
result_text, additional_tokens = await self.generate_chat_response(
|
||||||
messages, user=user, room=room, model=original_messages
|
messages=messages, user=user, room=room, model=original_model
|
||||||
)
|
)
|
||||||
except openai.APIError as e:
|
except openai.APIError as e:
|
||||||
if e.code == "max_tokens":
|
if e.code == "max_tokens":
|
||||||
|
@ -345,7 +345,7 @@ class OpenAI:
|
||||||
result_text,
|
result_text,
|
||||||
additional_tokens,
|
additional_tokens,
|
||||||
) = await self.generate_chat_response(
|
) = await self.generate_chat_response(
|
||||||
new_messages,
|
messages=new_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -359,7 +359,7 @@ class OpenAI:
|
||||||
result_text,
|
result_text,
|
||||||
additional_tokens,
|
additional_tokens,
|
||||||
) = await self.generate_chat_response(
|
) = await self.generate_chat_response(
|
||||||
original_messages,
|
messages=original_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -407,7 +407,7 @@ class OpenAI:
|
||||||
return (e.args[0] if e.args else False), 0
|
return (e.args[0] if e.args else False), 0
|
||||||
except Handover:
|
except Handover:
|
||||||
return await self.generate_chat_response(
|
return await self.generate_chat_response(
|
||||||
original_messages,
|
messages=original_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -430,7 +430,10 @@ class OpenAI:
|
||||||
result_text,
|
result_text,
|
||||||
additional_tokens,
|
additional_tokens,
|
||||||
) = await self.generate_chat_response(
|
) = await self.generate_chat_response(
|
||||||
messages, user=user, room=room
|
messages=messages,
|
||||||
|
user=user,
|
||||||
|
room=room,
|
||||||
|
model=original_model,
|
||||||
)
|
)
|
||||||
except openai.APIError as e:
|
except openai.APIError as e:
|
||||||
if e.code == "max_tokens":
|
if e.code == "max_tokens":
|
||||||
|
@ -438,7 +441,7 @@ class OpenAI:
|
||||||
result_text,
|
result_text,
|
||||||
additional_tokens,
|
additional_tokens,
|
||||||
) = await self.generate_chat_response(
|
) = await self.generate_chat_response(
|
||||||
original_messages,
|
messages=original_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -449,7 +452,7 @@ class OpenAI:
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
result_text, additional_tokens = await self.generate_chat_response(
|
result_text, additional_tokens = await self.generate_chat_response(
|
||||||
original_messages,
|
messages=original_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
@ -474,7 +477,7 @@ class OpenAI:
|
||||||
new_messages.append(new_message)
|
new_messages.append(new_message)
|
||||||
|
|
||||||
result_text, additional_tokens = await self.generate_chat_response(
|
result_text, additional_tokens = await self.generate_chat_response(
|
||||||
new_messages,
|
messages=new_messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False,
|
allow_override=False,
|
||||||
|
|
Loading…
Reference in a new issue