Compare commits
1 commit
main
...
room-name-
Author | SHA1 | Date | |
---|---|---|---|
f2d376501e |
44 changed files with 615 additions and 1122 deletions
|
@ -1,33 +0,0 @@
|
||||||
name: Docker CI/CD
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- "*"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
name: Docker Build and Push to Docker Hub
|
|
||||||
container:
|
|
||||||
image: node:20-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
apt update
|
|
||||||
apt install -y docker.io
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
- name: Build and push to Docker Hub
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
kumitterer/matrix-gptbot:latest
|
|
||||||
kumitterer/matrix-gptbot:${{ env.GITHUB_REF_NAME }}
|
|
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
|
@ -1,6 +0,0 @@
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -7,5 +7,4 @@ venv/
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.bak
|
*.bak
|
||||||
dist/
|
dist/
|
||||||
pantalaimon.conf
|
pantalaimon.conf
|
||||||
.ruff_cache/
|
|
69
CHANGELOG.md
69
CHANGELOG.md
|
@ -1,79 +1,42 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
### 0.3.14 (2024-05-21)
|
### 0.3.9 (unreleased)
|
||||||
|
|
||||||
- Fixed issue in handling of login credentials, added error handling for login failures
|
|
||||||
|
|
||||||
### 0.3.13 (2024-05-20)
|
|
||||||
|
|
||||||
- **Breaking Change**: The `ForceTools` configuration option behavior has changed. Instead of using a separate model for tools, the bot will now try to use the default chat model for tool requests, even if that model is not known to support tools.
|
|
||||||
- Added `ToolModel` to OpenAI configuration to allow specifying a separate model for tool requests
|
|
||||||
- Automatically resize context images to a default maximum of 2000x768 pixels before sending them to the AI model
|
|
||||||
|
|
||||||
### 0.3.12 (2024-05-17)
|
|
||||||
|
|
||||||
- Added `ForceVision` to OpenAI configuration to allow third-party models to be used for image recognition
|
|
||||||
- Added some missing properties to `OpenAI` class
|
|
||||||
|
|
||||||
### 0.3.11 (2024-05-17)
|
|
||||||
|
|
||||||
- Refactoring of AI provider handling in preparation for multiple AI providers: Introduced a `BaseAI` class that all AI providers must inherit from
|
|
||||||
- Added support for temperature, top_p, frequency_penalty, and presence_penalty in `AllowedUsers`
|
|
||||||
- Introduced ruff as a development dependency for linting and applied some linting fixes
|
|
||||||
- Fixed `gptbot` command line tool
|
|
||||||
- Changed default chat model to `gpt-4o`
|
|
||||||
- Changed default image generation model to `dall-e-3`
|
|
||||||
- Removed currently unused sections from `config.dist.ini`
|
|
||||||
- Changed provided Pantalaimon config file to not use a key ring by default
|
|
||||||
- Prevent bot from crashing when an unneeded dependency is missing
|
|
||||||
|
|
||||||
### 0.3.10 (2024-05-16)
|
|
||||||
|
|
||||||
- Add support for specifying room IDs in `AllowedUsers`
|
|
||||||
- Minor fixes
|
|
||||||
|
|
||||||
### 0.3.9 (2024-04-23)
|
|
||||||
|
|
||||||
- Add Docker support for running the bot in a container
|
|
||||||
- Add TrackingMore dependency to pyproject.toml
|
|
||||||
- Replace deprecated `pkg_resources` with `importlib.metadata`
|
|
||||||
- Allow password-based login on first login
|
|
||||||
|
|
||||||
### 0.3.7 / 0.3.8 (2024-04-15)
|
### 0.3.7 / 0.3.8 (2024-04-15)
|
||||||
|
|
||||||
- Changes to URLs in pyproject.toml
|
* Changes to URLs in pyproject.toml
|
||||||
- Migrated build pipeline to Forgejo Actions
|
* Migrated build pipeline to Forgejo Actions
|
||||||
|
|
||||||
### 0.3.6 (2024-04-11)
|
### 0.3.6 (2024-04-11)
|
||||||
|
|
||||||
- Fix issue where message type detection would fail for some messages (cece8cfb24e6f2e98d80d233b688c3e2c0ff05ae)
|
* Fix issue where message type detection would fail for some messages (cece8cfb24e6f2e98d80d233b688c3e2c0ff05ae)
|
||||||
|
|
||||||
### 0.3.5
|
### 0.3.5
|
||||||
|
|
||||||
- Only set room avatar if it is not already set (a9c23ee9c42d0a741a7eb485315e3e2d0a526725)
|
* Only set room avatar if it is not already set (a9c23ee9c42d0a741a7eb485315e3e2d0a526725)
|
||||||
|
|
||||||
### 0.3.4 (2024-02-18)
|
### 0.3.4 (2024-02-18)
|
||||||
|
|
||||||
- Optimize chat model and message handling (10b74187eb43bca516e2a469b69be1dbc9496408)
|
* Optimize chat model and message handling (10b74187eb43bca516e2a469b69be1dbc9496408)
|
||||||
- Fix parameter passing in chat response calls (2d564afd979e7bc9eee8204450254c9f86b663b5)
|
* Fix parameter passing in chat response calls (2d564afd979e7bc9eee8204450254c9f86b663b5)
|
||||||
- Refine message filtering in bot event processing (c47f947f80f79a443bbd622833662e3122b121ef)
|
* Refine message filtering in bot event processing (c47f947f80f79a443bbd622833662e3122b121ef)
|
||||||
|
|
||||||
### 0.3.3 (2024-01-26)
|
### 0.3.3 (2024-01-26)
|
||||||
|
|
||||||
- Implement recursion check in response generation (e6bc23e564e51aa149432fc67ce381a9260ee5f5)
|
* Implement recursion check in response generation (e6bc23e564e51aa149432fc67ce381a9260ee5f5)
|
||||||
- Implement tool emulation for models without tool support (0acc1456f9e4efa09e799f6ce2ec9a31f439fe4a)
|
* Implement tool emulation for models without tool support (0acc1456f9e4efa09e799f6ce2ec9a31f439fe4a)
|
||||||
- Allow selection of chat model by room (87173ae284957f66594e66166508e4e3bd60c26b)
|
* Allow selection of chat model by room (87173ae284957f66594e66166508e4e3bd60c26b)
|
||||||
|
|
||||||
### 0.3.2 (2023-12-14)
|
### 0.3.2 (2023-12-14)
|
||||||
|
|
||||||
- Removed key upload from room event handler
|
* Removed key upload from room event handler
|
||||||
- Fixed output of `python -m gptbot -v` to display currently installed version
|
* Fixed output of `python -m gptbot -v` to display currently installed version
|
||||||
- Workaround for bug preventing bot from responding when files are uploaded to an encrypted room
|
* Workaround for bug preventing bot from responding when files are uploaded to an encrypted room
|
||||||
|
|
||||||
#### Known Issues
|
#### Known Issues
|
||||||
|
|
||||||
- When using Pantalaimon: Bot is unable to download/use files uploaded to unencrypted rooms
|
* When using Pantalaimon: Bot is unable to download/use files uploaded to unencrypted rooms
|
||||||
|
|
||||||
### 0.3.1 (2023-12-07)
|
### 0.3.1 (2023-12-07)
|
||||||
|
|
||||||
- Fixed issue in newroom task causing it to be called over and over again
|
* Fixed issue in newroom task causing it to be called over and over again
|
14
Dockerfile
14
Dockerfile
|
@ -1,14 +0,0 @@
|
||||||
FROM python:3.12-slim
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY src/ /app/src
|
|
||||||
COPY pyproject.toml /app
|
|
||||||
COPY README.md /app
|
|
||||||
COPY LICENSE /app
|
|
||||||
|
|
||||||
RUN apt update
|
|
||||||
RUN apt install -y build-essential libpython3-dev ffmpeg
|
|
||||||
RUN pip install .[all]
|
|
||||||
RUN pip install 'future==1.0.0'
|
|
||||||
|
|
||||||
CMD ["python", "-m", "gptbot"]
|
|
72
README.md
72
README.md
|
@ -1,11 +1,6 @@
|
||||||
# GPTbot
|
# GPTbot
|
||||||
|
|
||||||
[![Support Private.coffee!](https://shields.private.coffee/badge/private.coffee-support%20us!-pink?logo=coffeescript)](https://private.coffee)
|
[![Support Private.coffee!](https://shields.private.coffee/badge/private.coffee-support%20us!-pink?logo=coffeescript)](https://private.coffee)
|
||||||
[![Matrix](https://shields.private.coffee/badge/Matrix-join%20us!-blue?logo=matrix)](https://matrix.to/#/#matrix-gptbot:private.coffee)
|
|
||||||
[![PyPI](https://shields.private.coffee/pypi/v/matrix-gptbot)](https://pypi.org/project/matrix-gptbot/)
|
|
||||||
[![PyPI - Python Version](https://shields.private.coffee/pypi/pyversions/matrix-gptbot)](https://pypi.org/project/matrix-gptbot/)
|
|
||||||
[![PyPI - License](https://shields.private.coffee/pypi/l/matrix-gptbot)](https://pypi.org/project/matrix-gptbot/)
|
|
||||||
[![Latest Git Commit](https://shields.private.coffee/gitea/last-commit/privatecoffee/matrix-gptbot?gitea_url=https://git.private.coffee)](https://git.private.coffee/privatecoffee/matrix-gptbot)
|
|
||||||
|
|
||||||
GPTbot is a simple bot that uses different APIs to generate responses to
|
GPTbot is a simple bot that uses different APIs to generate responses to
|
||||||
messages in a Matrix room.
|
messages in a Matrix room.
|
||||||
|
@ -14,8 +9,8 @@ messages in a Matrix room.
|
||||||
|
|
||||||
- AI-generated responses to text, image and voice messages in a Matrix room
|
- AI-generated responses to text, image and voice messages in a Matrix room
|
||||||
(chatbot)
|
(chatbot)
|
||||||
- Currently supports OpenAI (`gpt-3.5-turbo` and `gpt-4`, `gpt-4o`, `whisper`
|
- Currently supports OpenAI (`gpt-3.5-turbo` and `gpt-4`, including vision
|
||||||
and `tts`) and compatible APIs (e.g. `ollama`)
|
preview, `whisper` and `tts`)
|
||||||
- Able to generate pictures using OpenAI `dall-e-2`/`dall-e-3` models
|
- Able to generate pictures using OpenAI `dall-e-2`/`dall-e-3` models
|
||||||
- Able to browse the web to find information
|
- Able to browse the web to find information
|
||||||
- Able to use OpenWeatherMap to get weather information (requires separate
|
- Able to use OpenWeatherMap to get weather information (requires separate
|
||||||
|
@ -30,18 +25,16 @@ messages in a Matrix room.
|
||||||
|
|
||||||
To run the bot, you will need Python 3.10 or newer.
|
To run the bot, you will need Python 3.10 or newer.
|
||||||
|
|
||||||
The bot has been tested with Python 3.12 on Arch, but should work with any
|
The bot has been tested with Python 3.11 on Arch, but should work with any
|
||||||
current version, and should not require any special dependencies or operating
|
current version, and should not require any special dependencies or operating
|
||||||
system features.
|
system features.
|
||||||
|
|
||||||
### Production
|
### Production
|
||||||
|
|
||||||
#### PyPI
|
The easiest way to install the bot is to use pip to install it from pypi.
|
||||||
|
|
||||||
The recommended way to install the bot is to use pip to install it from PyPI.
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# Recommended: activate a venv first
|
# If desired, activate a venv first
|
||||||
|
|
||||||
python -m venv venv
|
python -m venv venv
|
||||||
. venv/bin/activate
|
. venv/bin/activate
|
||||||
|
@ -57,33 +50,10 @@ for all available features.
|
||||||
You can also use `pip install git+https://git.private.coffee/privatecoffee/matrix-gptbot.git`
|
You can also use `pip install git+https://git.private.coffee/privatecoffee/matrix-gptbot.git`
|
||||||
to install the latest version from the Git repository.
|
to install the latest version from the Git repository.
|
||||||
|
|
||||||
#### Docker
|
#### Configuration
|
||||||
|
|
||||||
A `docker-compose.yml` file is provided that you can use to run the bot with
|
The bot requires a configuration file to be present in the working directory.
|
||||||
Docker Compose. You will need to create a `config.ini` file as described in the
|
Copy the provided `config.dist.ini` to `config.ini` and edit it to your needs.
|
||||||
`Running` section.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# Clone the repository
|
|
||||||
git clone https://git.private.coffee/privatecoffee/matrix-gptbot.git
|
|
||||||
cd matrix-gptbot
|
|
||||||
|
|
||||||
# Create a config file
|
|
||||||
cp config.dist.ini config.ini
|
|
||||||
# Edit the config file to your needs
|
|
||||||
|
|
||||||
# Initialize the database file
|
|
||||||
sqlite3 database.db "SELECT 1"
|
|
||||||
|
|
||||||
# Optionally, create Pantalaimon config
|
|
||||||
cp contrib/pantalaimon.example.conf pantalaimon.conf
|
|
||||||
# Edit the Pantalaimon config file to your needs
|
|
||||||
# Update your homeserver URL in the bot's config.ini to point to Pantalaimon (probably http://pantalaimon:8009 if you used the provided example config)
|
|
||||||
# You can use `fetch_access_token.py` to get an access token for the bot
|
|
||||||
|
|
||||||
# Start the bot
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
#### End-to-end encryption
|
#### End-to-end encryption
|
||||||
|
|
||||||
|
@ -92,9 +62,14 @@ file attachments, especially in rooms that are not encrypted, if the same
|
||||||
user also uses the bot in encrypted rooms.
|
user also uses the bot in encrypted rooms.
|
||||||
|
|
||||||
The bot itself does not implement end-to-end encryption. However, it can be
|
The bot itself does not implement end-to-end encryption. However, it can be
|
||||||
used in conjunction with [pantalaimon](https://github.com/matrix-org/pantalaimon).
|
used in conjunction with [pantalaimon](https://github.com/matrix-org/pantalaimon),
|
||||||
|
which is actually installed as a dependency of the bot.
|
||||||
|
|
||||||
You first have to log in to your homeserver using `python fetch_access_token.py`,
|
To use pantalaimon, create a `pantalaimon.conf` following the example in
|
||||||
|
`pantalaimon.example.conf`, making sure to change the homeserver URL to match
|
||||||
|
your homeserver. Then, start pantalaimon with `pantalaimon -c pantalaimon.conf`.
|
||||||
|
|
||||||
|
You first have to log in to your homeserver using `python pantalaimon_first_login.py`,
|
||||||
and can then use the returned access token in your bot's `config.ini` file.
|
and can then use the returned access token in your bot's `config.ini` file.
|
||||||
|
|
||||||
Make sure to also point the bot to your pantalaimon instance by setting
|
Make sure to also point the bot to your pantalaimon instance by setting
|
||||||
|
@ -140,12 +115,7 @@ before merging.
|
||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
The bot requires a configuration file to be present in the working directory.
|
The bot can be run with `python -m gptbot`. If required, activate a venv first.
|
||||||
|
|
||||||
Copy the provided `config.dist.ini` to `config.ini` and edit it to your needs.
|
|
||||||
|
|
||||||
The bot can then be run with `python -m gptbot`. If required, activate a venv
|
|
||||||
first.
|
|
||||||
|
|
||||||
You may want to run the bot in a screen or tmux session, or use a process
|
You may want to run the bot in a screen or tmux session, or use a process
|
||||||
manager like systemd. The repository contains a sample systemd service file
|
manager like systemd. The repository contains a sample systemd service file
|
||||||
|
@ -222,12 +192,10 @@ Note that this currently only works for audio messages and .mp3 file uploads.
|
||||||
First of all, make sure that the bot is actually running. (Okay, that's not
|
First of all, make sure that the bot is actually running. (Okay, that's not
|
||||||
really troubleshooting, but it's a good start.)
|
really troubleshooting, but it's a good start.)
|
||||||
|
|
||||||
If the bot is running, check the logs, these should tell you what is going on.
|
If the bot is running, check the logs. The first few lines should contain
|
||||||
For example, if the bot is showing an error message like "Timed out, retrying",
|
"Starting bot...", "Syncing..." and "Bot started". If you don't see these
|
||||||
it is unable to reach your homeserver. In this case, check your homeserver URL
|
lines, something went wrong during startup. Fortunately, the logs should
|
||||||
and make sure that the bot can reach it. If you are using Pantalaimon, make
|
contain more information about what went wrong.
|
||||||
sure that the bot is pointed to Pantalaimon and not directly to your
|
|
||||||
homeserver, and that Pantalaimon is running and reachable.
|
|
||||||
|
|
||||||
If you need help figuring out what went wrong, feel free to open an issue.
|
If you need help figuring out what went wrong, feel free to open an issue.
|
||||||
|
|
||||||
|
|
117
config.dist.ini
117
config.dist.ini
|
@ -45,11 +45,10 @@ Operator = Contact details not set
|
||||||
# DisplayName = GPTBot
|
# DisplayName = GPTBot
|
||||||
|
|
||||||
# A list of allowed users
|
# A list of allowed users
|
||||||
# If not defined, everyone is allowed to use the bot (so you should really define this)
|
# If not defined, everyone is allowed to use the bot
|
||||||
# Use the "*:homeserver.matrix" syntax to allow everyone on a given homeserver
|
# Use the "*:homeserver.matrix" syntax to allow everyone on a given homeserver
|
||||||
# Alternatively, you can also specify a room ID to allow everyone in the room to use the bot within that room
|
|
||||||
#
|
#
|
||||||
# AllowedUsers = ["*:matrix.local", "!roomid:matrix.local"]
|
# AllowedUsers = ["*:matrix.local"]
|
||||||
|
|
||||||
# Minimum level of log messages that should be printed
|
# Minimum level of log messages that should be printed
|
||||||
# Available log levels in ascending order: trace, debug, info, warning, error, critical
|
# Available log levels in ascending order: trace, debug, info, warning, error, critical
|
||||||
|
@ -63,20 +62,20 @@ LogLevel = info
|
||||||
|
|
||||||
# The Chat Completion model you want to use.
|
# The Chat Completion model you want to use.
|
||||||
#
|
#
|
||||||
# Model = gpt-4o
|
# Unless you are in the GPT-4 beta (if you don't know - you aren't),
|
||||||
|
# leave this as the default value (gpt-3.5-turbo)
|
||||||
|
#
|
||||||
|
# Model = gpt-3.5-turbo
|
||||||
|
|
||||||
# The Image Generation model you want to use.
|
# The Image Generation model you want to use.
|
||||||
#
|
#
|
||||||
# ImageModel = dall-e-3
|
# ImageModel = dall-e-2
|
||||||
|
|
||||||
# Your OpenAI API key
|
# Your OpenAI API key
|
||||||
#
|
#
|
||||||
# Find this in your OpenAI account:
|
# Find this in your OpenAI account:
|
||||||
# https://platform.openai.com/account/api-keys
|
# https://platform.openai.com/account/api-keys
|
||||||
#
|
#
|
||||||
# This may not be required for self-hosted models – in that case, just leave it
|
|
||||||
# as it is.
|
|
||||||
#
|
|
||||||
APIKey = sk-yoursecretkey
|
APIKey = sk-yoursecretkey
|
||||||
|
|
||||||
# The maximum amount of input sent to the API
|
# The maximum amount of input sent to the API
|
||||||
|
@ -101,26 +100,17 @@ APIKey = sk-yoursecretkey
|
||||||
# The base URL of the OpenAI API
|
# The base URL of the OpenAI API
|
||||||
#
|
#
|
||||||
# Setting this allows you to use a self-hosted AI model for chat completions
|
# Setting this allows you to use a self-hosted AI model for chat completions
|
||||||
# using something like llama-cpp-python or ollama
|
# using something like https://github.com/abetlen/llama-cpp-python
|
||||||
#
|
#
|
||||||
# BaseURL = https://api.openai.com/v1/
|
# BaseURL = https://openai.local/v1
|
||||||
|
|
||||||
# Whether to force the use of tools in the chat completion model
|
# Whether to force the use of tools in the chat completion model
|
||||||
#
|
#
|
||||||
# This will make the bot allow the use of tools in the chat completion model,
|
# Currently, only gpt-3.5-turbo supports tools. If you set this to 1, the bot
|
||||||
# even if the model you are using isn't known to support tools. This is useful
|
# will use that model for tools even if you have a different model set as the
|
||||||
# if you are using a self-hosted model that supports tools, but the bot doesn't
|
# default. It will only generate the final result using the default model.
|
||||||
# know about it.
|
|
||||||
#
|
#
|
||||||
# ForceTools = 1
|
# ForceTools = 0
|
||||||
|
|
||||||
# Whether a dedicated model should be used for tools
|
|
||||||
#
|
|
||||||
# This will make the bot use a dedicated model for tools. This is useful if you
|
|
||||||
# want to use a model that doesn't support tools, but still want to be able to
|
|
||||||
# use tools.
|
|
||||||
#
|
|
||||||
# ToolModel = gpt-4o
|
|
||||||
|
|
||||||
# Whether to emulate tools in the chat completion model
|
# Whether to emulate tools in the chat completion model
|
||||||
#
|
#
|
||||||
|
@ -130,50 +120,6 @@ APIKey = sk-yoursecretkey
|
||||||
#
|
#
|
||||||
# EmulateTools = 0
|
# EmulateTools = 0
|
||||||
|
|
||||||
# Force vision in the chat completion model
|
|
||||||
#
|
|
||||||
# By default, the bot only supports image recognition in known vision models.
|
|
||||||
# If you set this to 1, the bot will assume that the model you're using supports
|
|
||||||
# vision, and will send images to the model as well. This may be required for
|
|
||||||
# some self-hosted models.
|
|
||||||
#
|
|
||||||
# ForceVision = 0
|
|
||||||
|
|
||||||
# Maximum width and height of images sent to the API if vision is enabled
|
|
||||||
#
|
|
||||||
# The OpenAI API has a limit of 2000 pixels for the long side of an image, and
|
|
||||||
# 768 pixels for the short side. You may have to adjust these values if you're
|
|
||||||
# using a self-hosted model that has different limits. You can also set these
|
|
||||||
# to 0 to disable image resizing.
|
|
||||||
#
|
|
||||||
# MaxImageLongSide = 2000
|
|
||||||
# MaxImageShortSide = 768
|
|
||||||
|
|
||||||
# Whether the used model supports video files as input
|
|
||||||
#
|
|
||||||
# If you are using a model that supports video files as input, set this to 1.
|
|
||||||
# This will make the bot send video files to the model as well as images.
|
|
||||||
# This may be possible with some self-hosted models, but is not supported by
|
|
||||||
# the OpenAI API at this time.
|
|
||||||
#
|
|
||||||
# ForceVideoInput = 0
|
|
||||||
|
|
||||||
# Advanced settings for the OpenAI API
|
|
||||||
#
|
|
||||||
# These settings are not required for normal operation, but can be used to
|
|
||||||
# tweak the behavior of the bot.
|
|
||||||
#
|
|
||||||
# Note: These settings are not validated by the bot, so make sure they are
|
|
||||||
# correct before setting them, or the bot may not work as expected.
|
|
||||||
#
|
|
||||||
# For more information, see the OpenAI documentation:
|
|
||||||
# https://platform.openai.com/docs/api-reference/chat/create
|
|
||||||
#
|
|
||||||
# Temperature = 1
|
|
||||||
# TopP = 1
|
|
||||||
# FrequencyPenalty = 0
|
|
||||||
# PresencePenalty = 0
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
[WolframAlpha]
|
[WolframAlpha]
|
||||||
|
@ -197,23 +143,17 @@ APIKey = sk-yoursecretkey
|
||||||
Homeserver = https://matrix.local
|
Homeserver = https://matrix.local
|
||||||
|
|
||||||
# An Access Token for the user your bot runs as
|
# An Access Token for the user your bot runs as
|
||||||
|
# Can be obtained using a request like this:
|
||||||
#
|
#
|
||||||
# See https://www.matrix.org/docs/guides/client-server-api#login
|
# See https://www.matrix.org/docs/guides/client-server-api#login
|
||||||
# for information on how to obtain this value
|
# for information on how to obtain this value
|
||||||
#
|
#
|
||||||
AccessToken = syt_yoursynapsetoken
|
AccessToken = syt_yoursynapsetoken
|
||||||
|
|
||||||
# Instead of an Access Token, you can also use a User ID and password
|
# The Matrix user ID of the bot (@local:domain.tld)
|
||||||
# to log in. Upon first run, the bot will automatically turn this into
|
# Only specify this if the bot fails to figure it out by itself
|
||||||
# an Access Token and store it in the config file, and remove the
|
|
||||||
# password from the config file.
|
|
||||||
#
|
|
||||||
# This is particularly useful if you are using Pantalaimon, where this
|
|
||||||
# is the only (easy) way to generate an Access Token.
|
|
||||||
#
|
#
|
||||||
# UserID = @gptbot:matrix.local
|
# UserID = @gptbot:matrix.local
|
||||||
# Password = yourpassword
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
@ -224,6 +164,11 @@ AccessToken = syt_yoursynapsetoken
|
||||||
#
|
#
|
||||||
Path = database.db
|
Path = database.db
|
||||||
|
|
||||||
|
# Path of the Crypto Store - required to support encrypted rooms
|
||||||
|
# (not tested/supported yet)
|
||||||
|
#
|
||||||
|
CryptoStore = store.db
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
[TrackingMore]
|
[TrackingMore]
|
||||||
|
@ -235,6 +180,26 @@ Path = database.db
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
[Replicate]
|
||||||
|
|
||||||
|
# API key for replicate.com
|
||||||
|
# Can be used to run lots of different AI models
|
||||||
|
# If not defined, the features that depend on it are not available
|
||||||
|
#
|
||||||
|
# APIKey = r8_alotoflettersandnumbershere
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
[HuggingFace]
|
||||||
|
|
||||||
|
# API key for Hugging Face
|
||||||
|
# Can be used to run lots of different AI models
|
||||||
|
# If not defined, the features that depend on it are not available
|
||||||
|
#
|
||||||
|
# APIKey = __________________________
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
[OpenWeatherMap]
|
[OpenWeatherMap]
|
||||||
|
|
||||||
# API key for OpenWeatherMap
|
# API key for OpenWeatherMap
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
[Homeserver]
|
|
||||||
Homeserver = https://example.com
|
|
||||||
ListenAddress = localhost
|
|
||||||
ListenPort = 8009
|
|
||||||
IgnoreVerification = True
|
|
||||||
LogLevel = debug
|
|
||||||
UseKeyring = no
|
|
|
@ -1,15 +0,0 @@
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
|
||||||
gptbot:
|
|
||||||
image: kumitterer/matrix-gptbot
|
|
||||||
volumes:
|
|
||||||
- ./config.ini:/app/config.ini
|
|
||||||
- ./database.db:/app/database.db
|
|
||||||
|
|
||||||
pantalaimon:
|
|
||||||
image: matrixdotorg/pantalaimon
|
|
||||||
volumes:
|
|
||||||
- ./pantalaimon.conf:/etc/pantalaimon/pantalaimon.conf
|
|
||||||
ports:
|
|
||||||
- "8009:8009"
|
|
5
pantalaimon.example.conf
Normal file
5
pantalaimon.example.conf
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
[Homeserver]
|
||||||
|
Homeserver = https://example.com
|
||||||
|
ListenAddress = localhost
|
||||||
|
ListenPort = 8010
|
||||||
|
IgnoreVerification = True
|
|
@ -7,51 +7,63 @@ allow-direct-references = true
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "matrix-gptbot"
|
name = "matrix-gptbot"
|
||||||
version = "0.3.21"
|
version = "0.3.9.dev0"
|
||||||
|
|
||||||
authors = [
|
authors = [
|
||||||
{ name = "Kumi", email = "gptbot@kumi.email" },
|
{ name="Kumi Mitterer", email="gptbot@kumi.email" },
|
||||||
{ name = "Private.coffee Team", email = "support@private.coffee" },
|
{ name="Private.coffee Team", email="support@private.coffee" },
|
||||||
]
|
]
|
||||||
|
|
||||||
description = "Multifunctional Chatbot for Matrix"
|
description = "Multifunctional Chatbot for Matrix"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file="LICENSE" }
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|
||||||
packages = ["src/gptbot"]
|
packages = [
|
||||||
|
"src/gptbot"
|
||||||
|
]
|
||||||
|
|
||||||
classifiers = [
|
classifiers = [
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
"License :: OSI Approved :: MIT License",
|
"License :: OSI Approved :: MIT License",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"matrix-nio[e2e]>=0.24.0",
|
"matrix-nio[e2e]",
|
||||||
"markdown2[all]",
|
"markdown2[all]",
|
||||||
"tiktoken",
|
"tiktoken",
|
||||||
"python-magic",
|
"python-magic",
|
||||||
"pillow",
|
"pillow",
|
||||||
"future>=1.0.0",
|
]
|
||||||
]
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
openai = ["openai>=1.2", "pydub"]
|
openai = [
|
||||||
|
"openai>=1.2",
|
||||||
|
"pydub",
|
||||||
|
]
|
||||||
|
|
||||||
google = ["google-generativeai"]
|
wolframalpha = [
|
||||||
|
"wolframalpha",
|
||||||
|
]
|
||||||
|
|
||||||
wolframalpha = ["wolframalpha"]
|
e2ee = [
|
||||||
|
"pantalaimon>=0.10.5",
|
||||||
trackingmore = ["trackingmore-api-tool"]
|
]
|
||||||
|
|
||||||
all = [
|
all = [
|
||||||
"matrix-gptbot[openai,wolframalpha,trackingmore,google]",
|
"matrix-gptbot[openai,wolframalpha,e2ee]",
|
||||||
"geopy",
|
"geopy",
|
||||||
"beautifulsoup4",
|
"beautifulsoup4",
|
||||||
]
|
]
|
||||||
|
|
||||||
dev = ["matrix-gptbot[all]", "black", "hatchling", "twine", "build", "ruff"]
|
dev = [
|
||||||
|
"matrix-gptbot[all]",
|
||||||
|
"black",
|
||||||
|
"hatchling",
|
||||||
|
"twine",
|
||||||
|
"build",
|
||||||
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
"Homepage" = "https://git.private.coffee/privatecoffee/matrix-gptbot"
|
"Homepage" = "https://git.private.coffee/privatecoffee/matrix-gptbot"
|
||||||
|
@ -59,7 +71,7 @@ dev = ["matrix-gptbot[all]", "black", "hatchling", "twine", "build", "ruff"]
|
||||||
"Source Code" = "https://git.private.coffee/privatecoffee/matrix-gptbot"
|
"Source Code" = "https://git.private.coffee/privatecoffee/matrix-gptbot"
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
gptbot = "gptbot.__main__:main_sync"
|
gptbot = "gptbot.__main__:main"
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["src/gptbot"]
|
packages = ["src/gptbot"]
|
|
@ -5,22 +5,19 @@ from configparser import ConfigParser
|
||||||
|
|
||||||
import signal
|
import signal
|
||||||
import asyncio
|
import asyncio
|
||||||
import importlib.metadata
|
import pkg_resources
|
||||||
|
|
||||||
|
|
||||||
def sigterm_handler(_signo, _stack_frame):
|
def sigterm_handler(_signo, _stack_frame):
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
|
|
||||||
def get_version():
|
def get_version():
|
||||||
try:
|
try:
|
||||||
package_version = importlib.metadata.version("matrix_gptbot")
|
package_version = pkg_resources.get_distribution("matrix_gptbot").version
|
||||||
except Exception:
|
except pkg_resources.DistributionNotFound:
|
||||||
return None
|
return None
|
||||||
return package_version
|
return package_version
|
||||||
|
|
||||||
|
def main():
|
||||||
async def main():
|
|
||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
parser = ArgumentParser()
|
parser = ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -43,28 +40,19 @@ async def main():
|
||||||
config.read(args.config)
|
config.read(args.config)
|
||||||
|
|
||||||
# Create bot
|
# Create bot
|
||||||
bot, new_config = await GPTBot.from_config(config)
|
bot = GPTBot.from_config(config)
|
||||||
|
|
||||||
# Update config with new values
|
|
||||||
if new_config:
|
|
||||||
with open(args.config, "w") as configfile:
|
|
||||||
new_config.write(configfile)
|
|
||||||
|
|
||||||
# Listen for SIGTERM
|
# Listen for SIGTERM
|
||||||
signal.signal(signal.SIGTERM, sigterm_handler)
|
signal.signal(signal.SIGTERM, sigterm_handler)
|
||||||
|
|
||||||
# Start bot
|
# Start bot
|
||||||
try:
|
try:
|
||||||
await bot.run()
|
asyncio.run(bot.run())
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("Received KeyboardInterrupt - exiting...")
|
print("Received KeyboardInterrupt - exiting...")
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
print("Received SIGTERM - exiting...")
|
print("Received SIGTERM - exiting...")
|
||||||
|
|
||||||
|
|
||||||
def main_sync():
|
|
||||||
asyncio.run(main())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main_sync()
|
main()
|
|
@ -1,24 +1,32 @@
|
||||||
from nio import (
|
from nio import (
|
||||||
RoomMessageText,
|
RoomMessageText,
|
||||||
InviteEvent,
|
InviteEvent,
|
||||||
|
Event,
|
||||||
SyncResponse,
|
SyncResponse,
|
||||||
JoinResponse,
|
JoinResponse,
|
||||||
RoomMemberEvent,
|
RoomMemberEvent,
|
||||||
|
Response,
|
||||||
|
MegolmEvent,
|
||||||
|
KeysQueryResponse
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from .test import test_callback
|
||||||
from .sync import sync_callback
|
from .sync import sync_callback
|
||||||
from .invite import room_invite_callback
|
from .invite import room_invite_callback
|
||||||
from .join import join_callback
|
from .join import join_callback
|
||||||
from .message import message_callback
|
from .message import message_callback
|
||||||
from .roommember import roommember_callback
|
from .roommember import roommember_callback
|
||||||
|
from .test_response import test_response_callback
|
||||||
|
|
||||||
RESPONSE_CALLBACKS = {
|
RESPONSE_CALLBACKS = {
|
||||||
|
#Response: test_response_callback,
|
||||||
SyncResponse: sync_callback,
|
SyncResponse: sync_callback,
|
||||||
JoinResponse: join_callback,
|
JoinResponse: join_callback,
|
||||||
}
|
}
|
||||||
|
|
||||||
EVENT_CALLBACKS = {
|
EVENT_CALLBACKS = {
|
||||||
|
#Event: test_callback,
|
||||||
InviteEvent: room_invite_callback,
|
InviteEvent: room_invite_callback,
|
||||||
RoomMessageText: message_callback,
|
RoomMessageText: message_callback,
|
||||||
RoomMemberEvent: roommember_callback,
|
RoomMemberEvent: roommember_callback,
|
||||||
}
|
}
|
|
@ -2,9 +2,9 @@ from nio import InviteEvent, MatrixRoom
|
||||||
|
|
||||||
async def room_invite_callback(room: MatrixRoom, event: InviteEvent, bot):
|
async def room_invite_callback(room: MatrixRoom, event: InviteEvent, bot):
|
||||||
if room.room_id in bot.matrix_client.rooms:
|
if room.room_id in bot.matrix_client.rooms:
|
||||||
bot.logger.log(f"Already in room {room.room_id} - ignoring invite")
|
logging(f"Already in room {room.room_id} - ignoring invite")
|
||||||
return
|
return
|
||||||
|
|
||||||
bot.logger.log(f"Received invite to room {room.room_id} - joining...")
|
bot.logger.log(f"Received invite to room {room.room_id} - joining...")
|
||||||
|
|
||||||
await bot.matrix_client.join(room.room_id)
|
response = await bot.matrix_client.join(room.room_id)
|
|
@ -8,12 +8,12 @@ async def join_callback(response, bot):
|
||||||
|
|
||||||
with closing(bot.database.cursor()) as cursor:
|
with closing(bot.database.cursor()) as cursor:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"SELECT space_id FROM user_spaces WHERE user_id = ? AND active = TRUE", (response.sender,))
|
"SELECT space_id FROM user_spaces WHERE user_id = ? AND active = TRUE", (event.sender,))
|
||||||
space = cursor.fetchone()
|
space = cursor.fetchone()
|
||||||
|
|
||||||
if space:
|
if space:
|
||||||
bot.logger.log(f"Adding new room to space {space[0]}...")
|
bot.logger.log(f"Adding new room to space {space[0]}...")
|
||||||
await bot.add_rooms_to_space(space[0], [response.room_id])
|
await bot.add_rooms_to_space(space[0], [new_room.room_id])
|
||||||
|
|
||||||
bot.matrix_client.keys_upload()
|
bot.matrix_client.keys_upload()
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from nio import RoomMemberEvent, MatrixRoom
|
from nio import RoomMemberEvent, MatrixRoom, KeysUploadError
|
||||||
|
|
||||||
async def roommember_callback(room: MatrixRoom, event: RoomMemberEvent, bot):
|
async def roommember_callback(room: MatrixRoom, event: RoomMemberEvent, bot):
|
||||||
if event.membership == "leave":
|
if event.membership == "leave":
|
||||||
|
|
11
src/gptbot/callbacks/test.py
Normal file
11
src/gptbot/callbacks/test.py
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
from nio import MatrixRoom, Event
|
||||||
|
|
||||||
|
async def test_callback(room: MatrixRoom, event: Event, bot):
|
||||||
|
"""Test callback for debugging purposes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room (MatrixRoom): The room the event was sent in.
|
||||||
|
event (Event): The event that was sent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
bot.logger.log(f"Test callback called: {room.room_id} {event.event_id} {event.sender} {event.__class__}")
|
11
src/gptbot/callbacks/test_response.py
Normal file
11
src/gptbot/callbacks/test_response.py
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
from nio import ErrorResponse
|
||||||
|
|
||||||
|
|
||||||
|
async def test_response_callback(response, bot):
|
||||||
|
if isinstance(response, ErrorResponse):
|
||||||
|
bot.logger.log(
|
||||||
|
f"Error response received ({response.__class__.__name__}): {response.message}",
|
||||||
|
"warning",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
bot.logger.log(f"{response.__class__} response received", "debug")
|
|
@ -1,76 +0,0 @@
|
||||||
from ...classes.logging import Logger
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from functools import partial
|
|
||||||
from typing import Any, AsyncGenerator, Dict, Optional, Mapping
|
|
||||||
|
|
||||||
from nio import Event
|
|
||||||
|
|
||||||
|
|
||||||
class AttributeDictionary(dict):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(AttributeDictionary, self).__init__(*args, **kwargs)
|
|
||||||
self.__dict__ = self
|
|
||||||
|
|
||||||
|
|
||||||
class BaseAI:
|
|
||||||
bot: Any
|
|
||||||
logger: Logger
|
|
||||||
|
|
||||||
def __init__(self, bot, config: Mapping, logger: Optional[Logger] = None):
|
|
||||||
self.bot = bot
|
|
||||||
self.logger = logger or bot.logger or Logger()
|
|
||||||
self._config = config
|
|
||||||
|
|
||||||
@property
|
|
||||||
def chat_api(self) -> str:
|
|
||||||
return self.chat_model
|
|
||||||
|
|
||||||
async def prepare_messages(
|
|
||||||
self, event: Event, messages: list[Any], system_message: Optional[str] = None
|
|
||||||
) -> list[Any]:
|
|
||||||
"""A helper method to prepare messages for the AI.
|
|
||||||
|
|
||||||
This converts a list of Matrix messages into whatever format the AI requires.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event (Event): The event that triggered the message generation. Generally a text message from a user.
|
|
||||||
messages (list[Dict[str, str]]): The messages to prepare. Generally of type RoomMessage*.
|
|
||||||
system_message (Optional[str], optional): A system message to include. Defaults to None.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list[Any]: The prepared messages in the format the AI requires.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
NotImplementedError: If the method is not implemented in the subclass.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError(
|
|
||||||
"Implementations of BaseAI must implement prepare_messages."
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _request_with_retries(
|
|
||||||
self, request: partial, attempts: int = 5, retry_interval: int = 2
|
|
||||||
) -> AsyncGenerator[Any | list | Dict, None]:
|
|
||||||
"""Retry a request a set number of times if it fails.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request (partial): The request to make with retries.
|
|
||||||
attempts (int, optional): The number of attempts to make. Defaults to 5.
|
|
||||||
retry_interval (int, optional): The interval in seconds between attempts. Defaults to 2 seconds.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
AsyncGenerator[Any | list | Dict, None]: The response for the request.
|
|
||||||
"""
|
|
||||||
current_attempt = 1
|
|
||||||
while current_attempt <= attempts:
|
|
||||||
try:
|
|
||||||
response = await request()
|
|
||||||
return response
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.log(f"Request failed: {e}", "error")
|
|
||||||
self.logger.log(f"Retrying in {retry_interval} seconds...")
|
|
||||||
await asyncio.sleep(retry_interval)
|
|
||||||
current_attempt += 1
|
|
||||||
|
|
||||||
raise Exception("Request failed after all attempts.")
|
|
|
@ -1,73 +0,0 @@
|
||||||
from .base import BaseAI
|
|
||||||
from ..logging import Logger
|
|
||||||
|
|
||||||
from typing import Optional, Mapping, List, Dict, Tuple
|
|
||||||
|
|
||||||
import google.generativeai as genai
|
|
||||||
|
|
||||||
|
|
||||||
class GeminiAI(BaseAI):
|
|
||||||
api_code: str = "google"
|
|
||||||
|
|
||||||
@property
|
|
||||||
def chat_api(self) -> str:
|
|
||||||
return self.chat_model
|
|
||||||
|
|
||||||
google_api: genai.GenerativeModel
|
|
||||||
|
|
||||||
operator: str = "Google (https://ai.google)"
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
bot,
|
|
||||||
config: Mapping,
|
|
||||||
logger: Optional[Logger] = None,
|
|
||||||
):
|
|
||||||
super().__init__(bot, config, logger)
|
|
||||||
genai.configure(api_key=self.api_key)
|
|
||||||
self.gemini_api = genai.GenerativeModel(self.chat_model)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def api_key(self):
|
|
||||||
return self._config["APIKey"]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def chat_model(self):
|
|
||||||
return self._config.get("Model", fallback="gemini-pro")
|
|
||||||
|
|
||||||
def prepare_messages(event, messages: List[Dict[str, str]], ) -> List[str]:
|
|
||||||
return [message["content"] for message in messages]
|
|
||||||
|
|
||||||
async def generate_chat_response(
|
|
||||||
self,
|
|
||||||
messages: List[Dict[str, str]],
|
|
||||||
user: Optional[str] = None,
|
|
||||||
room: Optional[str] = None,
|
|
||||||
use_tools: bool = True,
|
|
||||||
model: Optional[str] = None,
|
|
||||||
) -> Tuple[str, int]:
|
|
||||||
"""Generate a response to a chat message.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
messages (List[Dict[str, str]]): A list of messages to use as context.
|
|
||||||
user (Optional[str], optional): The user to use the assistant for. Defaults to None.
|
|
||||||
room (Optional[str], optional): The room to use the assistant for. Defaults to None.
|
|
||||||
use_tools (bool, optional): Whether to use tools. Defaults to True.
|
|
||||||
model (Optional[str], optional): The model to use. Defaults to None, which uses the default chat model.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple[str, int]: The response text and the number of tokens used.
|
|
||||||
"""
|
|
||||||
self.logger.log(
|
|
||||||
f"Generating response to {len(messages)} messages for user {user} in room {room}..."
|
|
||||||
)
|
|
||||||
|
|
||||||
messages = self.prepare_messages(messages)
|
|
||||||
|
|
||||||
return self.gemini_api.generate_content(
|
|
||||||
messages=messages,
|
|
||||||
user=user,
|
|
||||||
room=room,
|
|
||||||
use_tools=use_tools,
|
|
||||||
model=model,
|
|
||||||
)
|
|
|
@ -1,6 +1,7 @@
|
||||||
import markdown2
|
import markdown2
|
||||||
import tiktoken
|
import tiktoken
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import functools
|
||||||
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
@ -14,6 +15,8 @@ from nio import (
|
||||||
MatrixRoom,
|
MatrixRoom,
|
||||||
Api,
|
Api,
|
||||||
RoomMessagesError,
|
RoomMessagesError,
|
||||||
|
GroupEncryptionError,
|
||||||
|
EncryptionError,
|
||||||
RoomMessageText,
|
RoomMessageText,
|
||||||
RoomSendResponse,
|
RoomSendResponse,
|
||||||
SyncResponse,
|
SyncResponse,
|
||||||
|
@ -24,34 +27,42 @@ from nio import (
|
||||||
RoomVisibility,
|
RoomVisibility,
|
||||||
RoomCreateError,
|
RoomCreateError,
|
||||||
RoomMessageMedia,
|
RoomMessageMedia,
|
||||||
|
RoomMessageImage,
|
||||||
|
RoomMessageFile,
|
||||||
|
RoomMessageAudio,
|
||||||
DownloadError,
|
DownloadError,
|
||||||
|
DownloadResponse,
|
||||||
|
ToDeviceEvent,
|
||||||
|
ToDeviceError,
|
||||||
RoomGetStateError,
|
RoomGetStateError,
|
||||||
DiskDownloadResponse,
|
|
||||||
MemoryDownloadResponse,
|
|
||||||
LoginError,
|
|
||||||
)
|
)
|
||||||
from nio.store import SqliteStore
|
from nio.store import SqliteStore
|
||||||
|
|
||||||
|
|
||||||
from typing import Optional, List, Any, Union
|
from typing import Optional, List
|
||||||
from configparser import ConfigParser
|
from configparser import ConfigParser
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
|
||||||
|
import base64
|
||||||
import uuid
|
import uuid
|
||||||
import traceback
|
import traceback
|
||||||
import json
|
import json
|
||||||
|
import importlib.util
|
||||||
|
import sys
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
import traceback
|
||||||
|
|
||||||
from .logging import Logger
|
from .logging import Logger
|
||||||
from ..migrations import migrate
|
from ..migrations import migrate
|
||||||
from ..callbacks import RESPONSE_CALLBACKS, EVENT_CALLBACKS
|
from ..callbacks import RESPONSE_CALLBACKS, EVENT_CALLBACKS
|
||||||
from ..commands import COMMANDS
|
from ..commands import COMMANDS
|
||||||
from ..tools import TOOLS, Handover, StopProcessing
|
from ..tools import TOOLS, Handover, StopProcessing
|
||||||
from .ai.base import BaseAI
|
from .openai import OpenAI
|
||||||
from .exceptions import DownloadException
|
from .wolframalpha import WolframAlpha
|
||||||
|
from .trackingmore import TrackingMore
|
||||||
|
|
||||||
|
|
||||||
class GPTBot:
|
class GPTBot:
|
||||||
|
@ -61,13 +72,12 @@ class GPTBot:
|
||||||
matrix_client: Optional[AsyncClient] = None
|
matrix_client: Optional[AsyncClient] = None
|
||||||
sync_token: Optional[str] = None
|
sync_token: Optional[str] = None
|
||||||
logger: Optional[Logger] = Logger()
|
logger: Optional[Logger] = Logger()
|
||||||
chat_api: Optional[BaseAI] = None
|
chat_api: Optional[OpenAI] = None
|
||||||
image_api: Optional[BaseAI] = None
|
image_api: Optional[OpenAI] = None
|
||||||
classification_api: Optional[BaseAI] = None
|
classification_api: Optional[OpenAI] = None
|
||||||
tts_api: Optional[BaseAI] = None
|
tts_api: Optional[OpenAI] = None
|
||||||
stt_api: Optional[BaseAI] = None
|
stt_api: Optional[OpenAI] = None
|
||||||
parcel_api: Optional[Any] = None
|
parcel_api: Optional[TrackingMore] = None
|
||||||
calculation_api: Optional[Any] = None
|
|
||||||
room_ignore_list: List[str] = [] # List of rooms to ignore invites from
|
room_ignore_list: List[str] = [] # List of rooms to ignore invites from
|
||||||
logo: Optional[Image.Image] = None
|
logo: Optional[Image.Image] = None
|
||||||
logo_uri: Optional[str] = None
|
logo_uri: Optional[str] = None
|
||||||
|
@ -84,7 +94,7 @@ class GPTBot:
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return json.loads(self.config["GPTBot"]["AllowedUsers"])
|
return json.loads(self.config["GPTBot"]["AllowedUsers"])
|
||||||
except Exception:
|
except:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -126,6 +136,26 @@ class GPTBot:
|
||||||
"""
|
"""
|
||||||
return self.config["GPTBot"].getboolean("ForceSystemMessage", False)
|
return self.config["GPTBot"].getboolean("ForceSystemMessage", False)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_tokens(self) -> int:
|
||||||
|
"""Maximum number of input tokens.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The maximum number of input tokens. Defaults to 3000.
|
||||||
|
"""
|
||||||
|
return self.config["OpenAI"].getint("MaxTokens", 3000)
|
||||||
|
# TODO: Move this to OpenAI class
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_messages(self) -> int:
|
||||||
|
"""Maximum number of messages to consider as input.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The maximum number of messages to consider as input. Defaults to 30.
|
||||||
|
"""
|
||||||
|
return self.config["OpenAI"].getint("MaxMessages", 30)
|
||||||
|
# TODO: Move this to OpenAI class
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def operator(self) -> Optional[str]:
|
def operator(self) -> Optional[str]:
|
||||||
"""Operator of the bot.
|
"""Operator of the bot.
|
||||||
|
@ -168,7 +198,7 @@ class GPTBot:
|
||||||
USER_AGENT = "matrix-gptbot/dev (+https://kumig.it/kumitterer/matrix-gptbot)"
|
USER_AGENT = "matrix-gptbot/dev (+https://kumig.it/kumitterer/matrix-gptbot)"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def from_config(cls, config: ConfigParser):
|
def from_config(cls, config: ConfigParser):
|
||||||
"""Create a new GPTBot instance from a config file.
|
"""Create a new GPTBot instance from a config file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -200,70 +230,44 @@ class GPTBot:
|
||||||
if Path(bot.logo_path).exists() and Path(bot.logo_path).is_file():
|
if Path(bot.logo_path).exists() and Path(bot.logo_path).is_file():
|
||||||
bot.logo = Image.open(bot.logo_path)
|
bot.logo = Image.open(bot.logo_path)
|
||||||
|
|
||||||
# Set up OpenAI
|
bot.chat_api = bot.image_api = bot.classification_api = bot.tts_api = (
|
||||||
assert (
|
bot.stt_api
|
||||||
"OpenAI" in config
|
) = OpenAI(
|
||||||
), "OpenAI config not found" # TODO: Update this to support other providers
|
bot=bot,
|
||||||
|
api_key=config["OpenAI"]["APIKey"],
|
||||||
|
chat_model=config["OpenAI"].get("Model"),
|
||||||
|
image_model=config["OpenAI"].get("ImageModel"),
|
||||||
|
tts_model=config["OpenAI"].get("TTSModel"),
|
||||||
|
stt_model=config["OpenAI"].get("STTModel"),
|
||||||
|
base_url=config["OpenAI"].get("BaseURL"),
|
||||||
|
)
|
||||||
|
|
||||||
from .ai.openai import OpenAI
|
if "BaseURL" in config["OpenAI"]:
|
||||||
|
bot.chat_api.base_url = config["OpenAI"]["BaseURL"]
|
||||||
openai_api = OpenAI(bot=bot, config=config["OpenAI"])
|
bot.image_api = None
|
||||||
|
|
||||||
if "Model" in config["OpenAI"]:
|
|
||||||
bot.chat_api = openai_api
|
|
||||||
bot.classification_api = openai_api
|
|
||||||
|
|
||||||
if "ImageModel" in config["OpenAI"]:
|
|
||||||
bot.image_api = openai_api
|
|
||||||
|
|
||||||
if "TTSModel" in config["OpenAI"]:
|
|
||||||
bot.tts_api = openai_api
|
|
||||||
|
|
||||||
if "STTModel" in config["OpenAI"]:
|
|
||||||
bot.stt_api = openai_api
|
|
||||||
|
|
||||||
# Set up WolframAlpha
|
# Set up WolframAlpha
|
||||||
if "WolframAlpha" in config:
|
if "WolframAlpha" in config:
|
||||||
from .wolframalpha import WolframAlpha
|
|
||||||
|
|
||||||
bot.calculation_api = WolframAlpha(
|
bot.calculation_api = WolframAlpha(
|
||||||
config["WolframAlpha"]["APIKey"], bot.logger
|
config["WolframAlpha"]["APIKey"], bot.logger
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set up TrackingMore
|
# Set up TrackingMore
|
||||||
if "TrackingMore" in config:
|
if "TrackingMore" in config:
|
||||||
from .trackingmore import TrackingMore
|
|
||||||
|
|
||||||
bot.parcel_api = TrackingMore(config["TrackingMore"]["APIKey"], bot.logger)
|
bot.parcel_api = TrackingMore(config["TrackingMore"]["APIKey"], bot.logger)
|
||||||
|
|
||||||
# Set up the Matrix client
|
# Set up the Matrix client
|
||||||
|
|
||||||
assert "Matrix" in config, "Matrix config not found"
|
assert "Matrix" in config, "Matrix config not found"
|
||||||
|
|
||||||
homeserver = config["Matrix"]["Homeserver"]
|
homeserver = config["Matrix"]["Homeserver"]
|
||||||
|
bot.matrix_client = AsyncClient(homeserver)
|
||||||
|
bot.matrix_client.access_token = config["Matrix"]["AccessToken"]
|
||||||
|
bot.matrix_client.user_id = config["Matrix"].get("UserID")
|
||||||
|
bot.matrix_client.device_id = config["Matrix"].get("DeviceID")
|
||||||
|
|
||||||
if config.get("Matrix", "Password", fallback=""):
|
# Return the new GPTBot instance
|
||||||
if not config.get("Matrix", "UserID", fallback=""):
|
return bot
|
||||||
raise Exception("Cannot log in: UserID not set in config")
|
|
||||||
|
|
||||||
bot.matrix_client = AsyncClient(homeserver, user=config["Matrix"]["UserID"])
|
|
||||||
login = await bot.matrix_client.login(password=config["Matrix"]["Password"])
|
|
||||||
|
|
||||||
if isinstance(login, LoginError):
|
|
||||||
raise Exception(f"Could not log in: {login.message}")
|
|
||||||
|
|
||||||
config["Matrix"]["AccessToken"] = bot.matrix_client.access_token
|
|
||||||
config["Matrix"]["DeviceID"] = bot.matrix_client.device_id
|
|
||||||
config["Matrix"]["Password"] = ""
|
|
||||||
|
|
||||||
else:
|
|
||||||
bot.matrix_client = AsyncClient(homeserver)
|
|
||||||
|
|
||||||
bot.matrix_client.access_token = config["Matrix"]["AccessToken"]
|
|
||||||
bot.matrix_client.user_id = config["Matrix"].get("UserID")
|
|
||||||
bot.matrix_client.device_id = config["Matrix"].get("DeviceID")
|
|
||||||
|
|
||||||
# Return the new GPTBot instance and the (potentially modified) config
|
|
||||||
return bot, config
|
|
||||||
|
|
||||||
async def _get_user_id(self) -> str:
|
async def _get_user_id(self) -> str:
|
||||||
"""Get the user ID of the bot from the whoami endpoint.
|
"""Get the user ID of the bot from the whoami endpoint.
|
||||||
|
@ -296,7 +300,7 @@ class GPTBot:
|
||||||
ignore_notices: bool = True,
|
ignore_notices: bool = True,
|
||||||
):
|
):
|
||||||
messages = []
|
messages = []
|
||||||
n = n or self.chat_api.max_messages
|
n = n or self.max_messages
|
||||||
room_id = room.room_id if isinstance(room, MatrixRoom) else room
|
room_id = room.room_id if isinstance(room, MatrixRoom) else room
|
||||||
|
|
||||||
self.logger.log(
|
self.logger.log(
|
||||||
|
@ -323,13 +327,7 @@ class GPTBot:
|
||||||
try:
|
try:
|
||||||
event_type = event.source["content"]["msgtype"]
|
event_type = event.source["content"]["msgtype"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
if event.__class__.__name__ in ("RoomMemberEvent",):
|
self.logger.log(f"Could not process event: {event}", "debug")
|
||||||
self.logger.log(
|
|
||||||
f"Ignoring event of type {event.__class__.__name__}",
|
|
||||||
"debug",
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
self.logger.log(f"Could not process event: {event}", "warning")
|
|
||||||
continue # This is most likely not a message event
|
continue # This is most likely not a message event
|
||||||
|
|
||||||
if event_type.startswith("gptbot"):
|
if event_type.startswith("gptbot"):
|
||||||
|
@ -358,6 +356,56 @@ class GPTBot:
|
||||||
# Reverse the list so that messages are in chronological order
|
# Reverse the list so that messages are in chronological order
|
||||||
return messages[::-1]
|
return messages[::-1]
|
||||||
|
|
||||||
|
def _truncate(
|
||||||
|
self,
|
||||||
|
messages: list,
|
||||||
|
max_tokens: Optional[int] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
system_message: Optional[str] = None,
|
||||||
|
):
|
||||||
|
max_tokens = max_tokens or self.max_tokens
|
||||||
|
model = model or self.chat_api.chat_model
|
||||||
|
system_message = (
|
||||||
|
self.default_system_message if system_message is None else system_message
|
||||||
|
)
|
||||||
|
|
||||||
|
encoding = tiktoken.encoding_for_model(model)
|
||||||
|
total_tokens = 0
|
||||||
|
|
||||||
|
system_message_tokens = (
|
||||||
|
0 if not system_message else (len(encoding.encode(system_message)) + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
if system_message_tokens > max_tokens:
|
||||||
|
self.logger.log(
|
||||||
|
f"System message is too long to fit within token limit ({system_message_tokens} tokens) - cannot proceed",
|
||||||
|
"error",
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
total_tokens += system_message_tokens
|
||||||
|
|
||||||
|
total_tokens = len(system_message) + 1
|
||||||
|
truncated_messages = []
|
||||||
|
|
||||||
|
for message in [messages[0]] + list(reversed(messages[1:])):
|
||||||
|
content = (
|
||||||
|
message["content"]
|
||||||
|
if isinstance(message["content"], str)
|
||||||
|
else (
|
||||||
|
message["content"][0]["text"]
|
||||||
|
if isinstance(message["content"][0].get("text"), str)
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tokens = len(encoding.encode(content)) + 1
|
||||||
|
if total_tokens + tokens > max_tokens:
|
||||||
|
break
|
||||||
|
total_tokens += tokens
|
||||||
|
truncated_messages.append(message)
|
||||||
|
|
||||||
|
return [truncated_messages[0]] + list(reversed(truncated_messages[1:]))
|
||||||
|
|
||||||
async def _get_device_id(self) -> str:
|
async def _get_device_id(self) -> str:
|
||||||
"""Guess the device ID of the bot.
|
"""Guess the device ID of the bot.
|
||||||
Requires an access token to be set up.
|
Requires an access token to be set up.
|
||||||
|
@ -410,7 +458,7 @@ class GPTBot:
|
||||||
except (Handover, StopProcessing):
|
except (Handover, StopProcessing):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
except KeyError:
|
except KeyError as e:
|
||||||
self.logger.log(f"Tool {tool} not found", "error")
|
self.logger.log(f"Tool {tool} not found", "error")
|
||||||
return "Error: Tool not found"
|
return "Error: Tool not found"
|
||||||
|
|
||||||
|
@ -488,31 +536,13 @@ class GPTBot:
|
||||||
return (
|
return (
|
||||||
(
|
(
|
||||||
user_id in self.allowed_users
|
user_id in self.allowed_users
|
||||||
or (
|
or f"*:{user_id.split(':')[1]}" in self.allowed_users
|
||||||
(
|
or f"@*:{user_id.split(':')[1]}" in self.allowed_users
|
||||||
f"*:{user_id.split(':')[1]}" in self.allowed_users
|
|
||||||
or f"@*:{user_id.split(':')[1]}" in self.allowed_users
|
|
||||||
)
|
|
||||||
if not user_id.startswith("!") or user_id.startswith("#")
|
|
||||||
else False
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if self.allowed_users
|
if self.allowed_users
|
||||||
else True
|
else True
|
||||||
)
|
)
|
||||||
|
|
||||||
def room_is_allowed(self, room_id: str) -> bool:
|
|
||||||
"""Check if everyone in a room is allowed to use the bot.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str): The room ID to check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: Whether everyone in the room is allowed to use the bot.
|
|
||||||
"""
|
|
||||||
# TODO: Handle published aliases
|
|
||||||
return self.user_is_allowed(room_id)
|
|
||||||
|
|
||||||
async def event_callback(self, room: MatrixRoom, event: Event):
|
async def event_callback(self, room: MatrixRoom, event: Event):
|
||||||
"""Callback for events.
|
"""Callback for events.
|
||||||
|
|
||||||
|
@ -524,9 +554,7 @@ class GPTBot:
|
||||||
if event.sender == self.matrix_client.user_id:
|
if event.sender == self.matrix_client.user_id:
|
||||||
return
|
return
|
||||||
|
|
||||||
if not (
|
if not self.user_is_allowed(event.sender):
|
||||||
self.user_is_allowed(event.sender) or self.room_is_allowed(room.room_id)
|
|
||||||
):
|
|
||||||
if len(room.users) == 2:
|
if len(room.users) == 2:
|
||||||
await self.matrix_client.room_send(
|
await self.matrix_client.room_send(
|
||||||
room.room_id,
|
room.room_id,
|
||||||
|
@ -538,7 +566,7 @@ class GPTBot:
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
asyncio.create_task(self._event_callback(room, event))
|
task = asyncio.create_task(self._event_callback(room, event))
|
||||||
|
|
||||||
def room_uses_timing(self, room: MatrixRoom):
|
def room_uses_timing(self, room: MatrixRoom):
|
||||||
"""Check if a room uses timing.
|
"""Check if a room uses timing.
|
||||||
|
@ -566,7 +594,7 @@ class GPTBot:
|
||||||
await callback(response, self)
|
await callback(response, self)
|
||||||
|
|
||||||
async def response_callback(self, response: Response):
|
async def response_callback(self, response: Response):
|
||||||
asyncio.create_task(self._response_callback(response))
|
task = asyncio.create_task(self._response_callback(response))
|
||||||
|
|
||||||
async def accept_pending_invites(self):
|
async def accept_pending_invites(self):
|
||||||
"""Accept all pending invites."""
|
"""Accept all pending invites."""
|
||||||
|
@ -673,7 +701,7 @@ class GPTBot:
|
||||||
"url": content_uri,
|
"url": content_uri,
|
||||||
}
|
}
|
||||||
|
|
||||||
await self.matrix_client.room_send(room, "m.room.message", content)
|
status = await self.matrix_client.room_send(room, "m.room.message", content)
|
||||||
|
|
||||||
self.logger.log("Sent image", "debug")
|
self.logger.log("Sent image", "debug")
|
||||||
|
|
||||||
|
@ -707,7 +735,7 @@ class GPTBot:
|
||||||
"url": content_uri,
|
"url": content_uri,
|
||||||
}
|
}
|
||||||
|
|
||||||
await self.matrix_client.room_send(room, "m.room.message", content)
|
status = await self.matrix_client.room_send(room, "m.room.message", content)
|
||||||
|
|
||||||
self.logger.log("Sent file", "debug")
|
self.logger.log("Sent file", "debug")
|
||||||
|
|
||||||
|
@ -1100,10 +1128,7 @@ class GPTBot:
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
last_messages = await self._last_n_messages(
|
last_messages = await self._last_n_messages(room.room_id, self.max_messages)
|
||||||
room.room_id, self.chat_api.max_messages
|
|
||||||
)
|
|
||||||
self.logger.log(f"Last messages: {last_messages}", "debug")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.log(f"Error getting last messages: {e}", "error")
|
self.logger.log(f"Error getting last messages: {e}", "error")
|
||||||
await self.send_message(
|
await self.send_message(
|
||||||
|
@ -1113,8 +1138,141 @@ class GPTBot:
|
||||||
|
|
||||||
system_message = self.get_system_message(room)
|
system_message = self.get_system_message(room)
|
||||||
|
|
||||||
chat_messages = await self.chat_api.prepare_messages(
|
chat_messages = [{"role": "system", "content": system_message}]
|
||||||
event, last_messages, system_message
|
|
||||||
|
last_messages = last_messages + [event]
|
||||||
|
|
||||||
|
for message in last_messages:
|
||||||
|
if isinstance(message, (RoomMessageNotice, RoomMessageText)):
|
||||||
|
role = (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
)
|
||||||
|
if message == event or (not message.event_id == event.event_id):
|
||||||
|
message_body = (
|
||||||
|
message.body
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": message.body}]
|
||||||
|
)
|
||||||
|
chat_messages.append({"role": role, "content": message_body})
|
||||||
|
|
||||||
|
elif isinstance(message, RoomMessageAudio) or (
|
||||||
|
isinstance(message, RoomMessageFile) and message.body.endswith(".mp3")
|
||||||
|
):
|
||||||
|
role = (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
)
|
||||||
|
if message == event or (not message.event_id == event.event_id):
|
||||||
|
if self.room_uses_stt(room):
|
||||||
|
try:
|
||||||
|
download = await self.download_file(message.url)
|
||||||
|
message_text = await self.stt_api.speech_to_text(
|
||||||
|
download.body
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.log(
|
||||||
|
f"Error generating text from audio: {e}", "error"
|
||||||
|
)
|
||||||
|
message_text = message.body
|
||||||
|
else:
|
||||||
|
message_text = message.body
|
||||||
|
|
||||||
|
message_body = (
|
||||||
|
message_text
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": message_text}]
|
||||||
|
)
|
||||||
|
chat_messages.append({"role": role, "content": message_body})
|
||||||
|
|
||||||
|
elif isinstance(message, RoomMessageFile):
|
||||||
|
try:
|
||||||
|
download = await self.download_file(message.url)
|
||||||
|
if download:
|
||||||
|
try:
|
||||||
|
text = download.body.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
text = None
|
||||||
|
|
||||||
|
if text:
|
||||||
|
role = (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
)
|
||||||
|
if message == event or (
|
||||||
|
not message.event_id == event.event_id
|
||||||
|
):
|
||||||
|
message_body = (
|
||||||
|
text
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": text}]
|
||||||
|
)
|
||||||
|
chat_messages.append(
|
||||||
|
{"role": role, "content": message_body}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.log(f"Error generating text from file: {e}", "error")
|
||||||
|
message_body = (
|
||||||
|
message.body
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": message.body}]
|
||||||
|
)
|
||||||
|
chat_messages.append({"role": "system", "content": message_body})
|
||||||
|
|
||||||
|
elif self.chat_api.supports_chat_images() and isinstance(
|
||||||
|
message, RoomMessageImage
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
image_url = message.url
|
||||||
|
download = await self.download_file(image_url)
|
||||||
|
|
||||||
|
if download:
|
||||||
|
encoded_url = f"data:{download.content_type};base64,{base64.b64encode(download.body).decode('utf-8')}"
|
||||||
|
parent = (
|
||||||
|
chat_messages[-1]
|
||||||
|
if chat_messages
|
||||||
|
and chat_messages[-1]["role"]
|
||||||
|
== (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
)
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
if not parent:
|
||||||
|
chat_messages.append(
|
||||||
|
{
|
||||||
|
"role": (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
),
|
||||||
|
"content": [],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
parent = chat_messages[-1]
|
||||||
|
|
||||||
|
parent["content"].append(
|
||||||
|
{"type": "image_url", "image_url": {"url": encoded_url}}
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.log(f"Error generating image from file: {e}", "error")
|
||||||
|
message_body = (
|
||||||
|
message.body
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": message.body}]
|
||||||
|
)
|
||||||
|
chat_messages.append({"role": "system", "content": message_body})
|
||||||
|
|
||||||
|
# Truncate messages to fit within the token limit
|
||||||
|
truncated_messages = self._truncate(
|
||||||
|
chat_messages[1:], self.max_tokens - 1, system_message=system_message
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check for a model override
|
# Check for a model override
|
||||||
|
@ -1159,19 +1317,23 @@ class GPTBot:
|
||||||
await self.send_message(
|
await self.send_message(
|
||||||
room, "Something went wrong generating audio file.", True
|
room, "Something went wrong generating audio file.", True
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.debug:
|
|
||||||
await self.send_message(
|
|
||||||
room, f"Error: {e}\n\n```\n{traceback.format_exc()}\n```", True
|
|
||||||
)
|
|
||||||
|
|
||||||
await self.send_message(room, response)
|
message = await self.send_message(room, response)
|
||||||
|
|
||||||
|
# Set room name
|
||||||
|
|
||||||
|
if self.generate_room_name and room.name == self.default_room_name:
|
||||||
|
try:
|
||||||
|
name = await self.generate_room_name(room)
|
||||||
|
await self.matrix_client.room_put_state(
|
||||||
|
room.room_id, "m.room.name", {"name": name}, ""
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.log(f"Error generating room name: {e}", "error")
|
||||||
|
|
||||||
await self.matrix_client.room_typing(room.room_id, False)
|
await self.matrix_client.room_typing(room.room_id, False)
|
||||||
|
|
||||||
async def download_file(
|
async def download_file(self, mxc) -> Optional[bytes]:
|
||||||
self, mxc: str, raise_error: bool = False
|
|
||||||
) -> Union[DiskDownloadResponse, MemoryDownloadResponse]:
|
|
||||||
"""Download a file from the homeserver.
|
"""Download a file from the homeserver.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1185,12 +1347,52 @@ class GPTBot:
|
||||||
|
|
||||||
if isinstance(download, DownloadError):
|
if isinstance(download, DownloadError):
|
||||||
self.logger.log(f"Error downloading file: {download.message}", "error")
|
self.logger.log(f"Error downloading file: {download.message}", "error")
|
||||||
if raise_error:
|
|
||||||
raise DownloadException(download.message)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
return download
|
return download
|
||||||
|
|
||||||
|
async def generate_room_name(self, room: MatrixRoom | str) -> str:
|
||||||
|
"""Generate a name for a room.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room (MatrixRoom | str): The room to generate a name for.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The generated name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(room, MatrixRoom):
|
||||||
|
room = room.room_id
|
||||||
|
|
||||||
|
prompt = f"Generate a short, descriptive name for this conversation. It should start with '{self.default_room_name}:' and be no more than 50 characters long. Return only the name, without any additional text."
|
||||||
|
|
||||||
|
messages = await self._last_n_messages(room, 2)
|
||||||
|
|
||||||
|
chat_messages = [[{"role": "system", "content": prompt}]]
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
if isinstance(message, (RoomMessageNotice, RoomMessageText)):
|
||||||
|
role = (
|
||||||
|
"assistant"
|
||||||
|
if message.sender == self.matrix_client.user_id
|
||||||
|
else "user"
|
||||||
|
)
|
||||||
|
message_body = (
|
||||||
|
message.body
|
||||||
|
if not self.chat_api.supports_chat_images()
|
||||||
|
else [{"type": "text", "text": message.body}]
|
||||||
|
)
|
||||||
|
chat_messages.append({"role": role, "content": message_body})
|
||||||
|
|
||||||
|
response, tokens_used = await self.chat_api.generate_chat_response(
|
||||||
|
chat_messages,
|
||||||
|
room=room,
|
||||||
|
allow_override=False,
|
||||||
|
use_tools=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
def get_system_message(self, room: MatrixRoom | str) -> str:
|
def get_system_message(self, room: MatrixRoom | str) -> str:
|
||||||
"""Get the system message for a room.
|
"""Get the system message for a room.
|
||||||
|
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
class DownloadException(Exception):
|
|
||||||
pass
|
|
|
@ -2,30 +2,20 @@ import openai
|
||||||
import requests
|
import requests
|
||||||
import tiktoken
|
import tiktoken
|
||||||
|
|
||||||
import base64
|
import asyncio
|
||||||
import json
|
import json
|
||||||
|
import base64
|
||||||
import inspect
|
import inspect
|
||||||
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import Dict, List, Tuple, Generator, Optional, Mapping, Any
|
from contextlib import closing
|
||||||
|
from typing import Dict, List, Tuple, Generator, AsyncGenerator, Optional, Any
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from pydub import AudioSegment
|
from pydub import AudioSegment
|
||||||
from PIL import Image
|
|
||||||
from nio import (
|
|
||||||
RoomMessageNotice,
|
|
||||||
RoomMessageText,
|
|
||||||
RoomMessageAudio,
|
|
||||||
RoomMessageFile,
|
|
||||||
RoomMessageImage,
|
|
||||||
RoomMessageVideo,
|
|
||||||
Event,
|
|
||||||
)
|
|
||||||
|
|
||||||
from ..logging import Logger
|
from .logging import Logger
|
||||||
from ...tools import TOOLS, Handover, StopProcessing
|
from ..tools import TOOLS, Handover, StopProcessing
|
||||||
from ..exceptions import DownloadException
|
|
||||||
from .base import BaseAI, AttributeDictionary
|
|
||||||
|
|
||||||
ASSISTANT_CODE_INTERPRETER = [
|
ASSISTANT_CODE_INTERPRETER = [
|
||||||
{
|
{
|
||||||
|
@ -34,126 +24,58 @@ ASSISTANT_CODE_INTERPRETER = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class OpenAI(BaseAI):
|
class AttributeDictionary(dict):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(AttributeDictionary, self).__init__(*args, **kwargs)
|
||||||
|
self.__dict__ = self
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAI:
|
||||||
|
api_key: str
|
||||||
|
chat_model: str = "gpt-3.5-turbo"
|
||||||
|
logger: Logger
|
||||||
|
|
||||||
api_code: str = "openai"
|
api_code: str = "openai"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def chat_api(self) -> str:
|
def chat_api(self) -> str:
|
||||||
return self.chat_model
|
return self.chat_model
|
||||||
|
|
||||||
openai_api: openai.AsyncOpenAI
|
classification_api = chat_api
|
||||||
|
image_model: str = "dall-e-2"
|
||||||
|
tts_model: str = "tts-1-hd"
|
||||||
|
tts_voice: str = "alloy"
|
||||||
|
stt_model: str = "whisper-1"
|
||||||
|
|
||||||
operator: str = "OpenAI ([https://openai.com](https://openai.com))"
|
operator: str = "OpenAI ([https://openai.com](https://openai.com))"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
bot,
|
bot,
|
||||||
config: Mapping,
|
api_key,
|
||||||
logger: Optional[Logger] = None,
|
chat_model=None,
|
||||||
|
image_model=None,
|
||||||
|
tts_model=None,
|
||||||
|
tts_voice=None,
|
||||||
|
stt_model=None,
|
||||||
|
base_url=None,
|
||||||
|
logger=None,
|
||||||
):
|
):
|
||||||
super().__init__(bot, config, logger)
|
self.bot = bot
|
||||||
|
self.api_key = api_key
|
||||||
|
self.chat_model = chat_model or self.chat_model
|
||||||
|
self.image_model = image_model or self.image_model
|
||||||
|
self.logger = logger or bot.logger or Logger()
|
||||||
|
self.base_url = base_url or openai.base_url
|
||||||
self.openai_api = openai.AsyncOpenAI(
|
self.openai_api = openai.AsyncOpenAI(
|
||||||
api_key=self.api_key, base_url=self.base_url
|
api_key=self.api_key, base_url=self.base_url
|
||||||
)
|
)
|
||||||
|
self.tts_model = tts_model or self.tts_model
|
||||||
# TODO: Add descriptions for these properties
|
self.tts_voice = tts_voice or self.tts_voice
|
||||||
|
self.stt_model = stt_model or self.stt_model
|
||||||
@property
|
|
||||||
def api_key(self):
|
|
||||||
return self._config["APIKey"]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def chat_model(self):
|
|
||||||
return self._config.get("Model", fallback="gpt-4o")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def image_model(self):
|
|
||||||
return self._config.get("ImageModel", fallback="dall-e-3")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tts_model(self):
|
|
||||||
return self._config.get("TTSModel", fallback="tts-1-hd")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tts_voice(self):
|
|
||||||
return self._config.get("TTSVoice", fallback="alloy")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def stt_model(self):
|
|
||||||
return self._config.get("STTModel", fallback="whisper-1")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def base_url(self):
|
|
||||||
return self._config.get("BaseURL", fallback="https://api.openai.com/v1/")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def temperature(self):
|
|
||||||
return self._config.getfloat("Temperature", fallback=1.0)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def top_p(self):
|
|
||||||
return self._config.getfloat("TopP", fallback=1.0)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def frequency_penalty(self):
|
|
||||||
return self._config.getfloat("FrequencyPenalty", fallback=0.0)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def presence_penalty(self):
|
|
||||||
return self._config.getfloat("PresencePenalty", fallback=0.0)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def force_vision(self):
|
|
||||||
return self._config.getboolean("ForceVision", fallback=False)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def force_video_input(self):
|
|
||||||
return self._config.getboolean("ForceVideoInput", fallback=False)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def force_tools(self):
|
|
||||||
return self._config.getboolean("ForceTools", fallback=False)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tool_model(self):
|
|
||||||
return self._config.get("ToolModel")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def vision_model(self):
|
|
||||||
return self._config.get("VisionModel")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def emulate_tools(self):
|
|
||||||
return self._config.getboolean("EmulateTools", fallback=False)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def max_tokens(self):
|
|
||||||
# TODO: This should be model-specific
|
|
||||||
return self._config.getint("MaxTokens", fallback=4000)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def max_messages(self):
|
|
||||||
return self._config.getint("MaxMessages", fallback=30)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def max_image_long_side(self):
|
|
||||||
return self._config.getint("MaxImageLongSide", fallback=2000)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def max_image_short_side(self):
|
|
||||||
return self._config.getint("MaxImageShortSide", fallback=768)
|
|
||||||
|
|
||||||
def _is_tool_model(self, model: str) -> bool:
|
|
||||||
return model in ("gpt-3.5-turbo", "gpt-4-turbo", "gpt-4o")
|
|
||||||
|
|
||||||
def _is_vision_model(self, model: str) -> bool:
|
|
||||||
return model in ("gpt-4-turbo", "gpt-4o") or "vision" in model
|
|
||||||
|
|
||||||
def supports_chat_images(self):
|
def supports_chat_images(self):
|
||||||
return self._is_vision_model(self.chat_model) or self.force_vision
|
return "vision" in self.chat_model
|
||||||
|
|
||||||
def supports_chat_videos(self):
|
|
||||||
return self.force_video_input
|
|
||||||
|
|
||||||
def json_decode(self, data):
|
def json_decode(self, data):
|
||||||
if data.startswith("```json\n"):
|
if data.startswith("```json\n"):
|
||||||
|
@ -166,337 +88,36 @@ class OpenAI(BaseAI):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return json.loads(data)
|
return json.loads(data)
|
||||||
except Exception:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def prepare_messages(
|
async def _request_with_retries(
|
||||||
self,
|
self, request: partial, attempts: int = 5, retry_interval: int = 2
|
||||||
event: Event,
|
) -> AsyncGenerator[Any | list | Dict, None]:
|
||||||
messages: List[Dict[str, str]],
|
"""Retry a request a set number of times if it fails.
|
||||||
system_message=None,
|
|
||||||
room=None,
|
|
||||||
) -> List[Any]:
|
|
||||||
chat_messages = []
|
|
||||||
|
|
||||||
self.logger.log(f"Incoming messages: {messages}", "debug")
|
|
||||||
self.logger.log(f"System message: {system_message}", "debug")
|
|
||||||
|
|
||||||
messages.append(event)
|
|
||||||
|
|
||||||
for message in messages:
|
|
||||||
if isinstance(message, (RoomMessageNotice, RoomMessageText)):
|
|
||||||
role = (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.bot.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
)
|
|
||||||
if message == event or (not message.event_id == event.event_id):
|
|
||||||
message_body = (
|
|
||||||
message.body
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": message.body}]
|
|
||||||
)
|
|
||||||
chat_messages.append({"role": role, "content": message_body})
|
|
||||||
|
|
||||||
elif isinstance(message, RoomMessageAudio) or (
|
|
||||||
isinstance(message, RoomMessageFile) and message.body.endswith(".mp3")
|
|
||||||
):
|
|
||||||
role = (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.bot.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
)
|
|
||||||
if message == event or (not message.event_id == event.event_id):
|
|
||||||
if room and self.room_uses_stt(room):
|
|
||||||
try:
|
|
||||||
download = await self.bot.download_file(
|
|
||||||
message.url, raise_error=True
|
|
||||||
)
|
|
||||||
message_text = await self.bot.stt_api.speech_to_text(
|
|
||||||
download.body
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.log(
|
|
||||||
f"Error generating text from audio: {e}", "error"
|
|
||||||
)
|
|
||||||
message_text = message.body
|
|
||||||
else:
|
|
||||||
message_text = message.body
|
|
||||||
|
|
||||||
message_body = (
|
|
||||||
message_text
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": message_text}]
|
|
||||||
)
|
|
||||||
chat_messages.append({"role": role, "content": message_body})
|
|
||||||
|
|
||||||
elif isinstance(message, RoomMessageFile):
|
|
||||||
try:
|
|
||||||
download = await self.bot.download_file(
|
|
||||||
message.url, raise_error=True
|
|
||||||
)
|
|
||||||
if download:
|
|
||||||
try:
|
|
||||||
text = download.body.decode("utf-8")
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
text = None
|
|
||||||
|
|
||||||
if text:
|
|
||||||
role = (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.bot.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
)
|
|
||||||
if message == event or (
|
|
||||||
not message.event_id == event.event_id
|
|
||||||
):
|
|
||||||
message_body = (
|
|
||||||
text
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": text}]
|
|
||||||
)
|
|
||||||
chat_messages.append(
|
|
||||||
{"role": role, "content": message_body}
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.log(f"Error generating text from file: {e}", "error")
|
|
||||||
message_body = (
|
|
||||||
message.body
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": message.body}]
|
|
||||||
)
|
|
||||||
chat_messages.append({"role": "system", "content": message_body})
|
|
||||||
|
|
||||||
elif self.supports_chat_images() and isinstance(message, RoomMessageImage):
|
|
||||||
try:
|
|
||||||
image_url = message.url
|
|
||||||
download = await self.bot.download_file(image_url, raise_error=True)
|
|
||||||
|
|
||||||
if download:
|
|
||||||
pil_image = Image.open(BytesIO(download.body))
|
|
||||||
|
|
||||||
file_format = pil_image.format or "PNG"
|
|
||||||
|
|
||||||
max_long_side = self.max_image_long_side
|
|
||||||
max_short_side = self.max_image_short_side
|
|
||||||
|
|
||||||
if max_long_side and max_short_side:
|
|
||||||
if pil_image.width > pil_image.height:
|
|
||||||
if pil_image.width > max_long_side:
|
|
||||||
pil_image.thumbnail((max_long_side, max_short_side))
|
|
||||||
|
|
||||||
else:
|
|
||||||
if pil_image.height > max_long_side:
|
|
||||||
pil_image.thumbnail((max_short_side, max_long_side))
|
|
||||||
|
|
||||||
bio = BytesIO()
|
|
||||||
|
|
||||||
pil_image.save(bio, format=file_format)
|
|
||||||
|
|
||||||
encoded_url = f"data:{download.content_type};base64,{base64.b64encode(bio.getvalue()).decode('utf-8')}"
|
|
||||||
parent = (
|
|
||||||
chat_messages[-1]
|
|
||||||
if chat_messages
|
|
||||||
and chat_messages[-1]["role"]
|
|
||||||
== (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.bot.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
)
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
|
|
||||||
if not parent:
|
|
||||||
chat_messages.append(
|
|
||||||
{
|
|
||||||
"role": (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
),
|
|
||||||
"content": [],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
parent = chat_messages[-1]
|
|
||||||
|
|
||||||
parent["content"].append(
|
|
||||||
{"type": "image_url", "image_url": {"url": encoded_url}}
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if room and isinstance(e, DownloadException):
|
|
||||||
self.bot.send_message(
|
|
||||||
room,
|
|
||||||
f"Could not process image due to download error: {e.args[0]}",
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.log(f"Error generating image from file: {e}", "error")
|
|
||||||
message_body = (
|
|
||||||
message.body
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": message.body}]
|
|
||||||
)
|
|
||||||
chat_messages.append({"role": "system", "content": message_body})
|
|
||||||
|
|
||||||
elif self.supports_chat_videos() and (
|
|
||||||
isinstance(message, RoomMessageVideo)
|
|
||||||
or (
|
|
||||||
isinstance(message, RoomMessageFile)
|
|
||||||
and message.body.endswith(".mp4")
|
|
||||||
)
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
video_url = message.url
|
|
||||||
download = await self.bot.download_file(video_url, raise_error=True)
|
|
||||||
|
|
||||||
if download:
|
|
||||||
video = BytesIO(download.body)
|
|
||||||
video_url = f"data:{download.content_type};base64,{base64.b64encode(video.getvalue()).decode('utf-8')}"
|
|
||||||
|
|
||||||
parent = (
|
|
||||||
chat_messages[-1]
|
|
||||||
if chat_messages
|
|
||||||
and chat_messages[-1]["role"]
|
|
||||||
== (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.bot.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
)
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
|
|
||||||
if not parent:
|
|
||||||
chat_messages.append(
|
|
||||||
{
|
|
||||||
"role": (
|
|
||||||
"assistant"
|
|
||||||
if message.sender == self.matrix_client.user_id
|
|
||||||
else "user"
|
|
||||||
),
|
|
||||||
"content": [],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
parent = chat_messages[-1]
|
|
||||||
|
|
||||||
parent["content"].append(
|
|
||||||
{"type": "image_url", "image_url": {"url": video_url}}
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if room and isinstance(e, DownloadException):
|
|
||||||
self.bot.send_message(
|
|
||||||
room,
|
|
||||||
f"Could not process video due to download error: {e.args[0]}",
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.log(f"Error generating video from file: {e}", "error")
|
|
||||||
message_body = (
|
|
||||||
message.body
|
|
||||||
if not self.supports_chat_images()
|
|
||||||
else [{"type": "text", "text": message.body}]
|
|
||||||
)
|
|
||||||
chat_messages.append({"role": "system", "content": message_body})
|
|
||||||
|
|
||||||
self.logger.log(f"Prepared messages: {chat_messages}", "debug")
|
|
||||||
|
|
||||||
# Truncate messages to fit within the token limit
|
|
||||||
chat_messages = self._truncate(
|
|
||||||
messages=chat_messages,
|
|
||||||
max_tokens=self.max_tokens - 1,
|
|
||||||
system_message=system_message,
|
|
||||||
)
|
|
||||||
|
|
||||||
return chat_messages
|
|
||||||
|
|
||||||
def _truncate(
|
|
||||||
self,
|
|
||||||
messages: List[Any],
|
|
||||||
max_tokens: Optional[int] = None,
|
|
||||||
model: Optional[str] = None,
|
|
||||||
system_message: Optional[str] = None,
|
|
||||||
) -> List[Any]:
|
|
||||||
"""Truncate messages to fit within the token limit.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
messages (List[Any]): The messages to truncate.
|
request (partial): The request to make with retries.
|
||||||
max_tokens (Optional[int], optional): The maximum number of tokens to use. Defaults to None, which uses the default token limit.
|
attempts (int, optional): The number of attempts to make. Defaults to 5.
|
||||||
model (Optional[str], optional): The model to use. Defaults to None, which uses the default chat model.
|
retry_interval (int, optional): The interval in seconds between attempts. Defaults to 2 seconds.
|
||||||
system_message (Optional[str], optional): The system message to use. Defaults to None, which uses the default system message.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[Any]: The truncated messages.
|
AsyncGenerator[Any | list | Dict, None]: The OpenAI response for the request.
|
||||||
"""
|
"""
|
||||||
|
# call the request function and return the response if it succeeds, else retry
|
||||||
|
current_attempt = 1
|
||||||
|
while current_attempt <= attempts:
|
||||||
|
try:
|
||||||
|
response = await request()
|
||||||
|
return response
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.log(f"Request failed: {e}", "error")
|
||||||
|
self.logger.log(f"Retrying in {retry_interval} seconds...")
|
||||||
|
await asyncio.sleep(retry_interval)
|
||||||
|
current_attempt += 1
|
||||||
|
|
||||||
max_tokens = max_tokens or self.max_tokens
|
# if all attempts failed, raise an exception
|
||||||
model = model or self.chat_model
|
raise Exception("Request failed after all attempts.")
|
||||||
system_message = (
|
|
||||||
self.bot.default_system_message
|
|
||||||
if system_message is None
|
|
||||||
else system_message
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
encoding = tiktoken.encoding_for_model(model)
|
|
||||||
except Exception:
|
|
||||||
# TODO: Handle this more gracefully
|
|
||||||
encoding = tiktoken.encoding_for_model("gpt-4o")
|
|
||||||
|
|
||||||
total_tokens = 0
|
|
||||||
|
|
||||||
system_message_tokens = (
|
|
||||||
0 if not system_message else (len(encoding.encode(system_message)) + 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
if system_message_tokens > max_tokens:
|
|
||||||
self.logger.log(
|
|
||||||
f"System message is too long to fit within token limit ({system_message_tokens} tokens) - cannot proceed",
|
|
||||||
"error",
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
|
|
||||||
total_tokens += system_message_tokens
|
|
||||||
|
|
||||||
truncated_messages = []
|
|
||||||
|
|
||||||
self.logger.log(f"Messages: {messages}", "debug")
|
|
||||||
|
|
||||||
for message in [messages[0]] + list(reversed(messages[1:])):
|
|
||||||
content = (
|
|
||||||
message["content"]
|
|
||||||
if isinstance(message["content"], str)
|
|
||||||
else (
|
|
||||||
message["content"][0]["text"]
|
|
||||||
if isinstance(message["content"][0].get("text"), str)
|
|
||||||
else ""
|
|
||||||
)
|
|
||||||
)
|
|
||||||
tokens = len(encoding.encode(content)) + 1
|
|
||||||
if total_tokens + tokens > max_tokens:
|
|
||||||
break
|
|
||||||
total_tokens += tokens
|
|
||||||
truncated_messages.append(message)
|
|
||||||
|
|
||||||
system_message_dict = {
|
|
||||||
"role": "system",
|
|
||||||
"content": (
|
|
||||||
system_message
|
|
||||||
if isinstance(messages[0]["content"], str)
|
|
||||||
else [{"type": "text", "text": system_message}]
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
final_messages = (
|
|
||||||
[system_message_dict]
|
|
||||||
+ [truncated_messages[0]]
|
|
||||||
+ list(reversed(truncated_messages[1:]))
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.log(f"Truncated messages: {final_messages}", "debug")
|
|
||||||
|
|
||||||
return final_messages
|
|
||||||
|
|
||||||
async def generate_chat_response(
|
async def generate_chat_response(
|
||||||
self,
|
self,
|
||||||
|
@ -541,9 +162,9 @@ class OpenAI(BaseAI):
|
||||||
)
|
)
|
||||||
|
|
||||||
if count > 5:
|
if count > 5:
|
||||||
self.logger.log("Recursion depth exceeded, aborting.")
|
self.logger.log(f"Recursion depth exceeded, aborting.")
|
||||||
return await self.generate_chat_response(
|
return await self.generate_chat_response(
|
||||||
messages=messages,
|
messsages=messages,
|
||||||
user=user,
|
user=user,
|
||||||
room=room,
|
room=room,
|
||||||
allow_override=False, # TODO: Could this be a problem?
|
allow_override=False, # TODO: Could this be a problem?
|
||||||
|
@ -565,15 +186,10 @@ class OpenAI(BaseAI):
|
||||||
|
|
||||||
original_messages = messages
|
original_messages = messages
|
||||||
|
|
||||||
if (
|
if allow_override and not "gpt-3.5-turbo" in original_model:
|
||||||
allow_override
|
if self.bot.config.getboolean("OpenAI", "ForceTools", fallback=False):
|
||||||
and use_tools
|
self.logger.log(f"Overriding chat model to use tools")
|
||||||
and self.tool_model
|
chat_model = "gpt-3.5-turbo-0125"
|
||||||
and not (self._is_tool_model(chat_model) or self.force_tools)
|
|
||||||
):
|
|
||||||
if self.tool_model:
|
|
||||||
self.logger.log("Overriding chat model to use tools")
|
|
||||||
chat_model = self.tool_model
|
|
||||||
|
|
||||||
out_messages = []
|
out_messages = []
|
||||||
|
|
||||||
|
@ -598,9 +214,9 @@ class OpenAI(BaseAI):
|
||||||
|
|
||||||
if (
|
if (
|
||||||
use_tools
|
use_tools
|
||||||
and self.emulate_tools
|
and self.bot.config.getboolean("OpenAI", "EmulateTools", fallback=False)
|
||||||
and not self.force_tools
|
and not self.bot.config.getboolean("OpenAI", "ForceTools", fallback=False)
|
||||||
and not self._is_tool_model(chat_model)
|
and not "gpt-3.5-turbo" in chat_model
|
||||||
):
|
):
|
||||||
self.bot.logger.log("Using tool emulation mode.", "debug")
|
self.bot.logger.log("Using tool emulation mode.", "debug")
|
||||||
|
|
||||||
|
@ -641,33 +257,19 @@ class OpenAI(BaseAI):
|
||||||
"model": chat_model,
|
"model": chat_model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"user": room,
|
"user": room,
|
||||||
"temperature": self.temperature,
|
|
||||||
"top_p": self.top_p,
|
|
||||||
"frequency_penalty": self.frequency_penalty,
|
|
||||||
"presence_penalty": self.presence_penalty,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (self._is_tool_model(chat_model) and use_tools) or self.force_tools:
|
if "gpt-3.5-turbo" in chat_model and use_tools:
|
||||||
kwargs["tools"] = tools
|
kwargs["tools"] = tools
|
||||||
|
|
||||||
# TODO: Look into this
|
|
||||||
if "gpt-4" in chat_model:
|
if "gpt-4" in chat_model:
|
||||||
kwargs["max_tokens"] = self.max_tokens
|
kwargs["max_tokens"] = self.bot.config.getint(
|
||||||
|
"OpenAI", "MaxTokens", fallback=4000
|
||||||
api_url = self.base_url
|
)
|
||||||
|
|
||||||
if chat_model.startswith("gpt-"):
|
|
||||||
if not self.chat_model.startswith("gpt-"):
|
|
||||||
# The model is overridden, we have to ensure that OpenAI is used
|
|
||||||
if self.api_key.startswith("sk-"):
|
|
||||||
self.openai_api.base_url = "https://api.openai.com/v1/"
|
|
||||||
|
|
||||||
chat_partial = partial(self.openai_api.chat.completions.create, **kwargs)
|
chat_partial = partial(self.openai_api.chat.completions.create, **kwargs)
|
||||||
response = await self._request_with_retries(chat_partial)
|
response = await self._request_with_retries(chat_partial)
|
||||||
|
|
||||||
# Setting back the API URL to whatever it was before
|
|
||||||
self.openai_api.base_url = api_url
|
|
||||||
|
|
||||||
choice = response.choices[0]
|
choice = response.choices[0]
|
||||||
result_text = choice.message.content
|
result_text = choice.message.content
|
||||||
|
|
||||||
|
@ -682,7 +284,7 @@ class OpenAI(BaseAI):
|
||||||
tool_response = await self.bot.call_tool(
|
tool_response = await self.bot.call_tool(
|
||||||
tool_call, room=room, user=user
|
tool_call, room=room, user=user
|
||||||
)
|
)
|
||||||
if tool_response is not False:
|
if tool_response != False:
|
||||||
tool_responses.append(
|
tool_responses.append(
|
||||||
{
|
{
|
||||||
"role": "tool",
|
"role": "tool",
|
||||||
|
@ -703,7 +305,7 @@ class OpenAI(BaseAI):
|
||||||
)
|
)
|
||||||
|
|
||||||
if not tool_responses:
|
if not tool_responses:
|
||||||
self.logger.log("No more responses received, aborting.")
|
self.logger.log(f"No more responses received, aborting.")
|
||||||
result_text = False
|
result_text = False
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -719,7 +321,7 @@ class OpenAI(BaseAI):
|
||||||
except openai.APIError as e:
|
except openai.APIError as e:
|
||||||
if e.code == "max_tokens":
|
if e.code == "max_tokens":
|
||||||
self.logger.log(
|
self.logger.log(
|
||||||
"Max tokens exceeded, falling back to no-tools response."
|
f"Max tokens exceeded, falling back to no-tools response."
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
new_messages = []
|
new_messages = []
|
||||||
|
@ -768,6 +370,7 @@ class OpenAI(BaseAI):
|
||||||
elif isinstance((tool_object := self.json_decode(result_text)), dict):
|
elif isinstance((tool_object := self.json_decode(result_text)), dict):
|
||||||
if "tool" in tool_object:
|
if "tool" in tool_object:
|
||||||
tool_name = tool_object["tool"]
|
tool_name = tool_object["tool"]
|
||||||
|
tool_class = TOOLS[tool_name]
|
||||||
tool_parameters = (
|
tool_parameters = (
|
||||||
tool_object["parameters"] if "parameters" in tool_object else {}
|
tool_object["parameters"] if "parameters" in tool_object else {}
|
||||||
)
|
)
|
||||||
|
@ -791,7 +394,7 @@ class OpenAI(BaseAI):
|
||||||
tool_response = await self.bot.call_tool(
|
tool_response = await self.bot.call_tool(
|
||||||
tool_call, room=room, user=user
|
tool_call, room=room, user=user
|
||||||
)
|
)
|
||||||
if tool_response is not False:
|
if tool_response != False:
|
||||||
tool_responses = [
|
tool_responses = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
|
@ -811,7 +414,7 @@ class OpenAI(BaseAI):
|
||||||
)
|
)
|
||||||
|
|
||||||
if not tool_responses:
|
if not tool_responses:
|
||||||
self.logger.log("No response received, aborting.")
|
self.logger.log(f"No response received, aborting.")
|
||||||
result_text = False
|
result_text = False
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -879,14 +482,9 @@ class OpenAI(BaseAI):
|
||||||
model=original_model,
|
model=original_model,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not result_text:
|
|
||||||
self.logger.log(
|
|
||||||
"Received an empty response from the OpenAI endpoint.", "debug"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tokens_used = response.usage.total_tokens
|
tokens_used = response.usage.total_tokens
|
||||||
except Exception:
|
except:
|
||||||
tokens_used = 0
|
tokens_used = 0
|
||||||
|
|
||||||
self.logger.log(f"Generated response with {tokens_used} tokens.")
|
self.logger.log(f"Generated response with {tokens_used} tokens.")
|
||||||
|
@ -925,7 +523,7 @@ Only the event_types mentioned above are allowed, you must not respond in any ot
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = json.loads(response.choices[0].message["content"])
|
result = json.loads(response.choices[0].message["content"])
|
||||||
except Exception:
|
except:
|
||||||
result = {"type": "chat", "prompt": query}
|
result = {"type": "chat", "prompt": query}
|
||||||
|
|
||||||
tokens_used = response.usage["total_tokens"]
|
tokens_used = response.usage["total_tokens"]
|
||||||
|
@ -968,7 +566,7 @@ Only the event_types mentioned above are allowed, you must not respond in any ot
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[str, int]: The text and the number of tokens used.
|
Tuple[str, int]: The text and the number of tokens used.
|
||||||
"""
|
"""
|
||||||
self.logger.log("Generating text from speech...")
|
self.logger.log(f"Generating text from speech...")
|
||||||
|
|
||||||
audio_file = BytesIO()
|
audio_file = BytesIO()
|
||||||
AudioSegment.from_file(BytesIO(audio)).export(audio_file, format="mp3")
|
AudioSegment.from_file(BytesIO(audio)).export(audio_file, format="mp3")
|
||||||
|
@ -1055,20 +653,18 @@ Only the event_types mentioned above are allowed, you must not respond in any ot
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[str, int]: The description and the number of tokens used.
|
Tuple[str, int]: The description and the number of tokens used.
|
||||||
"""
|
"""
|
||||||
self.logger.log("Generating description for images in conversation...")
|
self.logger.log(f"Generating description for images in conversation...")
|
||||||
|
|
||||||
system_message = "You are an image description generator. You generate descriptions for all images in the current conversation, one after another."
|
system_message = "You are an image description generator. You generate descriptions for all images in the current conversation, one after another."
|
||||||
|
|
||||||
messages = [{"role": "system", "content": system_message}] + messages[1:]
|
messages = [{"role": "system", "content": system_message}] + messages[1:]
|
||||||
|
|
||||||
chat_model = self.chat_model
|
if not "vision" in (chat_model := self.chat_model):
|
||||||
|
chat_model = self.chat_model + "gpt-4-vision-preview"
|
||||||
if not self._is_vision_model(chat_model):
|
|
||||||
chat_model = self.vision_model or "gpt-4o"
|
|
||||||
|
|
||||||
chat_partial = partial(
|
chat_partial = partial(
|
||||||
self.openai_api.chat.completions.create,
|
self.openai_api.chat.completions.create,
|
||||||
model=chat_model,
|
model=self.chat_model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
user=str(user),
|
user=str(user),
|
||||||
)
|
)
|
|
@ -1,8 +1,9 @@
|
||||||
import trackingmore
|
import trackingmore
|
||||||
|
import requests
|
||||||
|
|
||||||
from .logging import Logger
|
from .logging import Logger
|
||||||
|
|
||||||
from typing import Tuple, Optional
|
from typing import Dict, List, Tuple, Generator, Optional
|
||||||
|
|
||||||
class TrackingMore:
|
class TrackingMore:
|
||||||
api_key: str
|
api_key: str
|
||||||
|
|
|
@ -3,7 +3,7 @@ import requests
|
||||||
|
|
||||||
from .logging import Logger
|
from .logging import Logger
|
||||||
|
|
||||||
from typing import Generator, Optional
|
from typing import Dict, List, Tuple, Generator, Optional
|
||||||
|
|
||||||
class WolframAlpha:
|
class WolframAlpha:
|
||||||
api_key: str
|
api_key: str
|
||||||
|
|
|
@ -3,16 +3,21 @@ from nio.rooms import MatrixRoom
|
||||||
|
|
||||||
|
|
||||||
async def command_botinfo(room: MatrixRoom, event: RoomMessageText, bot):
|
async def command_botinfo(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
bot.logger.log("Showing bot info...")
|
logging("Showing bot info...")
|
||||||
|
|
||||||
body = f"""GPT Room info:
|
body = f"""GPT Info:
|
||||||
|
|
||||||
Model: {await bot.get_room_model(room)}\n
|
Model: {bot.model}
|
||||||
Maximum context tokens: {bot.chat_api.max_tokens}\n
|
Maximum context tokens: {bot.max_tokens}
|
||||||
Maximum context messages: {bot.chat_api.max_messages}\n
|
Maximum context messages: {bot.max_messages}
|
||||||
Bot user ID: {bot.matrix_client.user_id}\n
|
|
||||||
Current room ID: {room.room_id}\n
|
Room info:
|
||||||
|
|
||||||
|
Bot user ID: {bot.matrix_client.user_id}
|
||||||
|
Current room ID: {room.room_id}
|
||||||
System message: {bot.get_system_message(room)}
|
System message: {bot.get_system_message(room)}
|
||||||
|
|
||||||
|
For usage statistics, run !gptbot stats
|
||||||
"""
|
"""
|
||||||
|
|
||||||
await bot.send_message(room, body, True)
|
await bot.send_message(room, body, True)
|
||||||
|
|
|
@ -23,12 +23,14 @@ async def command_calculate(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
bot.logger.log("Querying calculation API...")
|
bot.logger.log("Querying calculation API...")
|
||||||
|
|
||||||
for subpod in bot.calculation_api.generate_calculation_response(prompt, text, results_only, user=room.room_id):
|
for subpod in bot.calculation_api.generate_calculation_response(prompt, text, results_only, user=room.room_id):
|
||||||
bot.logger.log("Sending subpod...")
|
bot.logger.log(f"Sending subpod...")
|
||||||
if isinstance(subpod, bytes):
|
if isinstance(subpod, bytes):
|
||||||
await bot.send_image(room, subpod)
|
await bot.send_image(room, subpod)
|
||||||
else:
|
else:
|
||||||
await bot.send_message(room, subpod, True)
|
await bot.send_message(room, subpod, True)
|
||||||
|
|
||||||
|
bot.log_api_usage(event, room, f"{bot.calculation_api.api_code}-{bot.calculation_api.calculation_api}", tokens_used)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
await bot.send_message(room, "You need to provide a prompt.", True)
|
await bot.send_message(room, "You need to provide a prompt.", True)
|
|
@ -9,7 +9,7 @@ async def command_dice(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sides = int(event.body.split()[2])
|
sides = int(event.body.split()[2])
|
||||||
except (ValueError, IndexError):
|
except ValueError:
|
||||||
sides = 6
|
sides = 6
|
||||||
|
|
||||||
if sides < 2:
|
if sides < 2:
|
||||||
|
|
|
@ -8,17 +8,18 @@ async def command_help(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
- !gptbot help - Show this message
|
- !gptbot help - Show this message
|
||||||
- !gptbot botinfo - Show information about the bot
|
- !gptbot botinfo - Show information about the bot
|
||||||
- !gptbot privacy - Show privacy information
|
- !gptbot privacy - Show privacy information
|
||||||
- !gptbot newroom <room name> - Create a new room and invite yourself to it
|
- !gptbot newroom \<room name\> - Create a new room and invite yourself to it
|
||||||
- !gptbot systemmessage <message> - Get or set the system message for this room
|
- !gptbot stats - Show usage statistics for this room
|
||||||
|
- !gptbot systemmessage \<message\> - Get or set the system message for this room
|
||||||
- !gptbot space [enable|disable|update|invite] - Enable, disable, force update, or invite yourself to your space
|
- !gptbot space [enable|disable|update|invite] - Enable, disable, force update, or invite yourself to your space
|
||||||
- !gptbot coin - Flip a coin (heads or tails)
|
- !gptbot coin - Flip a coin (heads or tails)
|
||||||
- !gptbot dice [number] - Roll a dice with the specified number of sides (default: 6)
|
- !gptbot dice [number] - Roll a dice with the specified number of sides (default: 6)
|
||||||
- !gptbot imagine <prompt> - Generate an image from a prompt
|
- !gptbot imagine \<prompt\> - Generate an image from a prompt
|
||||||
- !gptbot calculate [--text] [--details] <query> - Calculate a result to a calculation, optionally forcing text output instead of an image, and optionally showing additional details like the input interpretation
|
- !gptbot calculate [--text] [--details] \<query\> - Calculate a result to a calculation, optionally forcing text output instead of an image, and optionally showing additional details like the input interpretation
|
||||||
- !gptbot chat <message> - Send a message to the chat API
|
- !gptbot chat \<message\> - Send a message to the chat API
|
||||||
- !gptbot classify <message> - Classify a message using the classification API
|
- !gptbot classify \<message\> - Classify a message using the classification API
|
||||||
- !gptbot custom <message> - Used for custom commands handled by the chat model and defined through the room's system message
|
- !gptbot custom \<message\> - Used for custom commands handled by the chat model and defined through the room's system message
|
||||||
- !gptbot roomsettings [use_classification|use_timing|always_reply|system_message|tts] [true|false|<message>] - Get or set room settings
|
- !gptbot roomsettings [use_classification|use_timing|always_reply|system_message|tts] [true|false|\<message\>] - Get or set room settings
|
||||||
- !gptbot ignoreolder - Ignore messages before this point as context
|
- !gptbot ignoreolder - Ignore messages before this point as context
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ async def command_imagine(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
return
|
return
|
||||||
|
|
||||||
for image in images:
|
for image in images:
|
||||||
bot.logger.log("Sending image...")
|
bot.logger.log(f"Sending image...")
|
||||||
await bot.send_image(room, image)
|
await bot.send_image(room, image)
|
||||||
|
|
||||||
bot.log_api_usage(event, room, f"{bot.image_api.api_code}-{bot.image_api.image_model}", tokens_used)
|
bot.log_api_usage(event, room, f"{bot.image_api.api_code}-{bot.image_api.image_model}", tokens_used)
|
||||||
|
|
|
@ -13,7 +13,7 @@ async def command_newroom(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
|
|
||||||
if isinstance(new_room, RoomCreateError):
|
if isinstance(new_room, RoomCreateError):
|
||||||
bot.logger.log(f"Failed to create room: {new_room.message}")
|
bot.logger.log(f"Failed to create room: {new_room.message}")
|
||||||
await bot.send_message(room, "Sorry, I was unable to create a new room. Please try again later, or create a room manually.", True)
|
await bot.send_message(room, f"Sorry, I was unable to create a new room. Please try again later, or create a room manually.", True)
|
||||||
return
|
return
|
||||||
|
|
||||||
bot.logger.log(f"Inviting {event.sender} to new room...")
|
bot.logger.log(f"Inviting {event.sender} to new room...")
|
||||||
|
@ -21,7 +21,7 @@ async def command_newroom(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
|
|
||||||
if isinstance(invite, RoomInviteError):
|
if isinstance(invite, RoomInviteError):
|
||||||
bot.logger.log(f"Failed to invite user: {invite.message}")
|
bot.logger.log(f"Failed to invite user: {invite.message}")
|
||||||
await bot.send_message(room, "Sorry, I was unable to invite you to the new room. Please try again later, or create a room manually.", True)
|
await bot.send_message(room, f"Sorry, I was unable to invite you to the new room. Please try again later, or create a room manually.", True)
|
||||||
return
|
return
|
||||||
|
|
||||||
with closing(bot.database.cursor()) as cursor:
|
with closing(bot.database.cursor()) as cursor:
|
||||||
|
@ -43,4 +43,4 @@ async def command_newroom(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
|
|
||||||
await bot.matrix_client.joined_rooms()
|
await bot.matrix_client.joined_rooms()
|
||||||
await bot.send_message(room, f"Alright, I've created a new room called '{room_name}' and invited you to it. You can find it at {new_room.room_id}", True)
|
await bot.send_message(room, f"Alright, I've created a new room called '{room_name}' and invited you to it. You can find it at {new_room.room_id}", True)
|
||||||
await bot.send_message(bot.matrix_client.rooms[new_room.room_id], "Welcome to the new room! What can I do for you?")
|
await bot.send_message(bot.matrix_client.rooms[new_room.room_id], f"Welcome to the new room! What can I do for you?")
|
||||||
|
|
|
@ -11,7 +11,7 @@ async def command_privacy(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
body += "- For chat requests: " + f"{bot.chat_api.operator}" + "\n"
|
body += "- For chat requests: " + f"{bot.chat_api.operator}" + "\n"
|
||||||
if bot.image_api:
|
if bot.image_api:
|
||||||
body += "- For image generation requests (!gptbot imagine): " + f"{bot.image_api.operator}" + "\n"
|
body += "- For image generation requests (!gptbot imagine): " + f"{bot.image_api.operator}" + "\n"
|
||||||
if bot.calculation_api:
|
if bot.calculate_api:
|
||||||
body += "- For calculation requests (!gptbot calculate): " + f"{bot.calculation_api.operator}" + "\n"
|
body += "- For calculation requests (!gptbot calculate): " + f"{bot.calculate_api.operator}" + "\n"
|
||||||
|
|
||||||
await bot.send_message(room, body, True)
|
await bot.send_message(room, body, True)
|
|
@ -114,7 +114,7 @@ async def command_roomsettings(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
await bot.send_message(room, f"The current chat model is: '{value}'.", True)
|
await bot.send_message(room, f"The current chat model is: '{value}'.", True)
|
||||||
return
|
return
|
||||||
|
|
||||||
message = """The following settings are available:
|
message = f"""The following settings are available:
|
||||||
|
|
||||||
- system_message [message]: Get or set the system message to be sent to the chat model
|
- system_message [message]: Get or set the system message to be sent to the chat model
|
||||||
- classification [true/false]: Get or set whether the room uses classification
|
- classification [true/false]: Get or set whether the room uses classification
|
||||||
|
|
|
@ -120,7 +120,7 @@ async def command_space(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
|
|
||||||
if isinstance(response, RoomInviteError):
|
if isinstance(response, RoomInviteError):
|
||||||
bot.logger.log(
|
bot.logger.log(
|
||||||
f"Failed to invite user {event.sender} to space {space}", "error")
|
f"Failed to invite user {user} to space {space}", "error")
|
||||||
await bot.send_message(
|
await bot.send_message(
|
||||||
room, "Sorry, I couldn't invite you to the space. Please try again later.", True)
|
room, "Sorry, I couldn't invite you to the space. Please try again later.", True)
|
||||||
return
|
return
|
||||||
|
|
|
@ -5,30 +5,16 @@ from contextlib import closing
|
||||||
|
|
||||||
|
|
||||||
async def command_stats(room: MatrixRoom, event: RoomMessageText, bot):
|
async def command_stats(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
await bot.send_message(
|
|
||||||
room,
|
|
||||||
"The `stats` command is no longer supported. Sorry for the inconvenience.",
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Yes, the code below is unreachable, but it's kept here for reference.
|
|
||||||
|
|
||||||
bot.logger.log("Showing stats...")
|
bot.logger.log("Showing stats...")
|
||||||
|
|
||||||
if not bot.database:
|
if not bot.database:
|
||||||
bot.logger.log("No database connection - cannot show stats")
|
bot.logger.log("No database connection - cannot show stats")
|
||||||
await bot.send_message(
|
bot.send_message(room, "Sorry, I'm not connected to a database, so I don't have any statistics on your usage.", True)
|
||||||
room,
|
return
|
||||||
"Sorry, I'm not connected to a database, so I don't have any statistics on your usage.",
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
with closing(bot.database.cursor()) as cursor:
|
with closing(bot.database.cursor()) as cursor:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"SELECT SUM(tokens) FROM token_usage WHERE room_id = ?", (room.room_id,)
|
"SELECT SUM(tokens) FROM token_usage WHERE room_id = ?", (room.room_id,))
|
||||||
)
|
|
||||||
total_tokens = cursor.fetchone()[0] or 0
|
total_tokens = cursor.fetchone()[0] or 0
|
||||||
|
|
||||||
await bot.send_message(room, f"Total tokens used: {total_tokens}", True)
|
bot.send_message(room, f"Total tokens used: {total_tokens}", True)
|
||||||
|
|
|
@ -15,7 +15,7 @@ async def command_tts(room: MatrixRoom, event: RoomMessageText, bot):
|
||||||
await bot.send_message(room, "Sorry, I couldn't generate an audio file. Please try again later.", True)
|
await bot.send_message(room, "Sorry, I couldn't generate an audio file. Please try again later.", True)
|
||||||
return
|
return
|
||||||
|
|
||||||
bot.logger.log("Sending audio file...")
|
bot.logger.log(f"Sending audio file...")
|
||||||
await bot.send_file(room, content, "audio.mp3", "audio/mpeg", "m.audio")
|
await bot.send_file(room, content, "audio.mp3", "audio/mpeg", "m.audio")
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -22,7 +22,7 @@ def get_version(db: SQLiteConnection) -> int:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return int(db.execute("SELECT MAX(id) FROM migrations").fetchone()[0])
|
return int(db.execute("SELECT MAX(id) FROM migrations").fetchone()[0])
|
||||||
except Exception:
|
except:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def migrate(db: SQLiteConnection, from_version: Optional[int] = None, to_version: Optional[int] = None) -> None:
|
def migrate(db: SQLiteConnection, from_version: Optional[int] = None, to_version: Optional[int] = None) -> None:
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
# Migration for Matrix Store - No longer used
|
# Migration for Matrix Store - No longer used
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from contextlib import closing
|
||||||
|
|
||||||
def migration(conn):
|
def migration(conn):
|
||||||
pass
|
pass
|
|
@ -1,6 +1,6 @@
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
|
|
||||||
from .base import BaseTool, StopProcessing, Handover # noqa: F401
|
from .base import BaseTool, StopProcessing, Handover
|
||||||
|
|
||||||
TOOLS = {}
|
TOOLS = {}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from .base import BaseTool
|
from .base import BaseTool, Handover
|
||||||
|
|
||||||
class Imagedescription(BaseTool):
|
class Imagedescription(BaseTool):
|
||||||
DESCRIPTION = "Describe the content of the images in the conversation."
|
DESCRIPTION = "Describe the content of the images in the conversation."
|
||||||
|
|
|
@ -47,7 +47,7 @@ class Newroom(BaseTool):
|
||||||
await self.bot.add_rooms_to_space(space[0], [new_room.room_id])
|
await self.bot.add_rooms_to_space(space[0], [new_room.room_id])
|
||||||
|
|
||||||
if self.bot.logo_uri:
|
if self.bot.logo_uri:
|
||||||
await self.bot.matrix_client.room_put_state(new_room, "m.room.avatar", {
|
await self.bot.matrix_client.room_put_state(room, "m.room.avatar", {
|
||||||
"url": self.bot.logo_uri
|
"url": self.bot.logo_uri
|
||||||
}, "")
|
}, "")
|
||||||
|
|
||||||
|
|
|
@ -17,10 +17,6 @@ class Weather(BaseTool):
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The longitude of the location.",
|
"description": "The longitude of the location.",
|
||||||
},
|
},
|
||||||
"name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "A location name to include in the report. This is optional, latitude and longitude are always required."
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"required": ["latitude", "longitude"],
|
"required": ["latitude", "longitude"],
|
||||||
}
|
}
|
||||||
|
@ -30,8 +26,6 @@ class Weather(BaseTool):
|
||||||
if not (latitude := self.kwargs.get("latitude")) or not (longitude := self.kwargs.get("longitude")):
|
if not (latitude := self.kwargs.get("latitude")) or not (longitude := self.kwargs.get("longitude")):
|
||||||
raise Exception('No location provided.')
|
raise Exception('No location provided.')
|
||||||
|
|
||||||
name = self.kwargs.get("name")
|
|
||||||
|
|
||||||
weather_api_key = self.bot.config.get("OpenWeatherMap", "APIKey")
|
weather_api_key = self.bot.config.get("OpenWeatherMap", "APIKey")
|
||||||
|
|
||||||
if not weather_api_key:
|
if not weather_api_key:
|
||||||
|
@ -43,7 +37,7 @@ class Weather(BaseTool):
|
||||||
async with session.get(url) as response:
|
async with session.get(url) as response:
|
||||||
if response.status == 200:
|
if response.status == 200:
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
return f"""**Weather report{f" for {name}" if name else ""}**
|
return f"""**Weather report**
|
||||||
Current: {data['current']['temp']}°C, {data['current']['weather'][0]['description']}
|
Current: {data['current']['temp']}°C, {data['current']['weather'][0]['description']}
|
||||||
Feels like: {data['current']['feels_like']}°C
|
Feels like: {data['current']['feels_like']}°C
|
||||||
Humidity: {data['current']['humidity']}%
|
Humidity: {data['current']['humidity']}%
|
||||||
|
|
Loading…
Reference in a new issue