Kumi
bf23771989
New "imagine" and "calculate" commands Implemented image sending Moved OpenAI specific code to OpenAI class Abstracted away OpenAI API in bot class Minor fixes
92 lines
2.5 KiB
INI
92 lines
2.5 KiB
INI
# Copy this file to config.ini and replace the values below to match your needs
|
|
#
|
|
# The values that are not commented have to be set, everything else comes with
|
|
# sensible defaults.
|
|
|
|
[OpenAI]
|
|
|
|
# The Chat Completion model you want to use.
|
|
#
|
|
# Unless you are in the GPT-4 beta (if you don't know - you aren't),
|
|
# leave this as the default value (gpt-3.5-turbo)
|
|
#
|
|
# Model = gpt-3.5-turbo
|
|
|
|
# Your OpenAI API key
|
|
#
|
|
# Find this in your OpenAI account:
|
|
# https://platform.openai.com/account/api-keys
|
|
#
|
|
APIKey = sk-yoursecretkey
|
|
|
|
# The maximum amount of input sent to the API
|
|
#
|
|
# In conjunction with MaxMessage, this determines how much context (= previous
|
|
# messages) you can send with your query.
|
|
#
|
|
# If you set this too high, the responses you receive will become shorter the
|
|
# longer the conversation gets.
|
|
#
|
|
# https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
|
|
#
|
|
# MaxTokens = 3000
|
|
|
|
# The maximum number of messages in the room that will be considered as context
|
|
#
|
|
# By default, the last (up to) 20 messages will be sent as context, in addition
|
|
# to the system message and the current query itself.
|
|
#
|
|
# MaxMessages = 20
|
|
|
|
[WolframAlpha]
|
|
|
|
# An API key for Wolfram|Alpha
|
|
# Request one at https://developer.wolframalpha.com
|
|
#
|
|
# Leave unset to disable Wolfram|Alpha integration (`!gptbot calculate`)
|
|
#
|
|
#APIKey = YOUR-APIKEY
|
|
|
|
[Matrix]
|
|
|
|
# The URL to your Matrix homeserver
|
|
#
|
|
Homeserver = https://matrix.local
|
|
|
|
# An Access Token for the user your bot runs as
|
|
# Can be obtained using a request like this:
|
|
#
|
|
# See https://www.matrix.org/docs/guides/client-server-api#login
|
|
# for information on how to obtain this value
|
|
#
|
|
AccessToken = syt_yoursynapsetoken
|
|
|
|
# The Matrix user ID of the bot (@local:domain.tld)
|
|
# Only specify this if the bot fails to figure it out by itself
|
|
#
|
|
# UserID = @gptbot:matrix.local
|
|
|
|
[GPTBot]
|
|
|
|
# The default room name used by the !newroom command
|
|
# Defaults to GPTBot if not set
|
|
#
|
|
# DefaultRoomName = GPTBot
|
|
|
|
# Contents of a special message sent to the GPT API with every request.
|
|
# Can be used to give the bot some context about the environment it's running in
|
|
#
|
|
# SystemMessage = You are a helpful bot.
|
|
|
|
# Force inclusion of the SystemMessage defined above if one is defined on per-room level
|
|
# If no custom message is defined for the room, SystemMessage is always included
|
|
#
|
|
# ForceSystemMessage = 0
|
|
|
|
[Database]
|
|
|
|
# Settings for the DuckDB database.
|
|
# If not defined, the bot will not be able to remember anything, and will not support encryption
|
|
# N.B.: Encryption doesn't work as it is supposed to anyway.
|
|
|
|
Path = database.db
|