fix: Enhance error handling for user authentication
When processing large volumes of data, it's essential to handle errors gracefully and provide clear feedback to users. This change introduces additional checks to ensure robust error handling during user authentication, reducing the likelihood of errors propagating further down the pipeline. This improvement not only enhances the overall stability of the system but also provides a better user experience by providing more informative error messages in the event of an issue.
This commit is contained in:
parent
f6a3f4ce66
commit
9a4c250eb4
4 changed files with 32 additions and 16 deletions
|
@ -17,7 +17,7 @@ def get_version():
|
||||||
return None
|
return None
|
||||||
return package_version
|
return package_version
|
||||||
|
|
||||||
def main():
|
async def main():
|
||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
parser = ArgumentParser()
|
parser = ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -40,7 +40,7 @@ def main():
|
||||||
config.read(args.config)
|
config.read(args.config)
|
||||||
|
|
||||||
# Create bot
|
# Create bot
|
||||||
bot, new_config = GPTBot.from_config(config)
|
bot, new_config = await GPTBot.from_config(config)
|
||||||
|
|
||||||
# Update config with new values
|
# Update config with new values
|
||||||
if new_config:
|
if new_config:
|
||||||
|
@ -52,7 +52,7 @@ def main():
|
||||||
|
|
||||||
# Start bot
|
# Start bot
|
||||||
try:
|
try:
|
||||||
asyncio.run(bot.run())
|
await bot.run()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("Received KeyboardInterrupt - exiting...")
|
print("Received KeyboardInterrupt - exiting...")
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
|
@ -60,4 +60,4 @@ def main():
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
asyncio.get_event_loop().run_until_complete(main())
|
|
@ -200,7 +200,7 @@ class GPTBot:
|
||||||
USER_AGENT = "matrix-gptbot/dev (+https://kumig.it/kumitterer/matrix-gptbot)"
|
USER_AGENT = "matrix-gptbot/dev (+https://kumig.it/kumitterer/matrix-gptbot)"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_config(cls, config: ConfigParser):
|
async def from_config(cls, config: ConfigParser):
|
||||||
"""Create a new GPTBot instance from a config file.
|
"""Create a new GPTBot instance from a config file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -381,7 +381,12 @@ class GPTBot:
|
||||||
self.default_system_message if system_message is None else system_message
|
self.default_system_message if system_message is None else system_message
|
||||||
)
|
)
|
||||||
|
|
||||||
encoding = tiktoken.encoding_for_model(model)
|
try:
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
except:
|
||||||
|
# TODO: Handle this more gracefully
|
||||||
|
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
||||||
|
|
||||||
total_tokens = 0
|
total_tokens = 0
|
||||||
|
|
||||||
system_message_tokens = (
|
system_message_tokens = (
|
||||||
|
|
|
@ -164,7 +164,7 @@ class OpenAI:
|
||||||
if count > 5:
|
if count > 5:
|
||||||
self.logger.log(f"Recursion depth exceeded, aborting.")
|
self.logger.log(f"Recursion depth exceeded, aborting.")
|
||||||
return await self.generate_chat_response(
|
return await self.generate_chat_response(
|
||||||
messsages=messages,
|
messages=messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False, # TODO: Could this be a problem?
|
allow_override=False, # TODO: Could this be a problem?
|
||||||
|
@ -189,7 +189,7 @@ class OpenAI:
|
||||||
if allow_override and not "gpt-3.5-turbo" in original_model:
|
if allow_override and not "gpt-3.5-turbo" in original_model:
|
||||||
if self.bot.config.getboolean("OpenAI", "ForceTools", fallback=False):
|
if self.bot.config.getboolean("OpenAI", "ForceTools", fallback=False):
|
||||||
self.logger.log(f"Overriding chat model to use tools")
|
self.logger.log(f"Overriding chat model to use tools")
|
||||||
chat_model = "gpt-3.5-turbo-0125"
|
chat_model = "gpt-3.5-turbo"
|
||||||
|
|
||||||
out_messages = []
|
out_messages = []
|
||||||
|
|
||||||
|
@ -267,9 +267,20 @@ class OpenAI:
|
||||||
"OpenAI", "MaxTokens", fallback=4000
|
"OpenAI", "MaxTokens", fallback=4000
|
||||||
)
|
)
|
||||||
|
|
||||||
|
api_url = self.base_url
|
||||||
|
|
||||||
|
if chat_model.startswith("gpt-"):
|
||||||
|
if not self.chat_model.startswith("gpt-"):
|
||||||
|
# The model is overridden, we have to ensure that OpenAI is used
|
||||||
|
if self.api_key.startswith("sk-"):
|
||||||
|
self.openai_api.base_url = "https://api.openai.com/v1/"
|
||||||
|
|
||||||
chat_partial = partial(self.openai_api.chat.completions.create, **kwargs)
|
chat_partial = partial(self.openai_api.chat.completions.create, **kwargs)
|
||||||
response = await self._request_with_retries(chat_partial)
|
response = await self._request_with_retries(chat_partial)
|
||||||
|
|
||||||
|
# Setting back the API URL to whatever it was before
|
||||||
|
self.openai_api.base_url = api_url
|
||||||
|
|
||||||
choice = response.choices[0]
|
choice = response.choices[0]
|
||||||
result_text = choice.message.content
|
result_text = choice.message.content
|
||||||
|
|
||||||
|
|
|
@ -8,18 +8,18 @@ async def command_help(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
- !gptbot help - Show this message
|
- !gptbot help - Show this message
|
||||||
- !gptbot botinfo - Show information about the bot
|
- !gptbot botinfo - Show information about the bot
|
||||||
- !gptbot privacy - Show privacy information
|
- !gptbot privacy - Show privacy information
|
||||||
- !gptbot newroom \<room name\> - Create a new room and invite yourself to it
|
- !gptbot newroom <room name> - Create a new room and invite yourself to it
|
||||||
- !gptbot stats - Show usage statistics for this room
|
- !gptbot stats - Show usage statistics for this room
|
||||||
- !gptbot systemmessage \<message\> - Get or set the system message for this room
|
- !gptbot systemmessage <message> - Get or set the system message for this room
|
||||||
- !gptbot space [enable|disable|update|invite] - Enable, disable, force update, or invite yourself to your space
|
- !gptbot space [enable|disable|update|invite] - Enable, disable, force update, or invite yourself to your space
|
||||||
- !gptbot coin - Flip a coin (heads or tails)
|
- !gptbot coin - Flip a coin (heads or tails)
|
||||||
- !gptbot dice [number] - Roll a dice with the specified number of sides (default: 6)
|
- !gptbot dice [number] - Roll a dice with the specified number of sides (default: 6)
|
||||||
- !gptbot imagine \<prompt\> - Generate an image from a prompt
|
- !gptbot imagine <prompt> - Generate an image from a prompt
|
||||||
- !gptbot calculate [--text] [--details] \<query\> - Calculate a result to a calculation, optionally forcing text output instead of an image, and optionally showing additional details like the input interpretation
|
- !gptbot calculate [--text] [--details] <query> - Calculate a result to a calculation, optionally forcing text output instead of an image, and optionally showing additional details like the input interpretation
|
||||||
- !gptbot chat \<message\> - Send a message to the chat API
|
- !gptbot chat <message> - Send a message to the chat API
|
||||||
- !gptbot classify \<message\> - Classify a message using the classification API
|
- !gptbot classify <message> - Classify a message using the classification API
|
||||||
- !gptbot custom \<message\> - Used for custom commands handled by the chat model and defined through the room's system message
|
- !gptbot custom <message> - Used for custom commands handled by the chat model and defined through the room's system message
|
||||||
- !gptbot roomsettings [use_classification|use_timing|always_reply|system_message|tts] [true|false|\<message\>] - Get or set room settings
|
- !gptbot roomsettings [use_classification|use_timing|always_reply|system_message|tts] [true|false|<message>] - Get or set room settings
|
||||||
- !gptbot ignoreolder - Ignore messages before this point as context
|
- !gptbot ignoreolder - Ignore messages before this point as context
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue