2023-04-28 10:01:27 +00:00
import openai
import requests
2023-05-01 06:11:43 +00:00
import json
2023-04-28 10:01:27 +00:00
from . logging import Logger
2023-05-01 06:11:43 +00:00
from typing import Dict , List , Tuple , Generator , Optional
2023-04-28 10:01:27 +00:00
class OpenAI :
api_key : str
chat_model : str = " gpt-3.5-turbo "
logger : Logger
2023-05-01 06:11:43 +00:00
api_code : str = " openai "
@property
def chat_api ( self ) - > str :
return self . chat_model
classification_api = chat_api
image_api : str = " dalle "
operator : str = " OpenAI ([https://openai.com](https://openai.com)) "
2023-04-28 10:01:27 +00:00
def __init__ ( self , api_key , chat_model = None , logger = None ) :
self . api_key = api_key
self . chat_model = chat_model or self . chat_model
self . logger = logger or Logger ( )
2023-05-01 06:11:43 +00:00
def generate_chat_response ( self , messages : List [ Dict [ str , str ] ] , user : Optional [ str ] = None ) - > Tuple [ str , int ] :
2023-04-28 10:01:27 +00:00
""" Generate a response to a chat message.
Args :
messages ( List [ Dict [ str , str ] ] ) : A list of messages to use as context .
Returns :
Tuple [ str , int ] : The response text and the number of tokens used .
"""
self . logger . log ( f " Generating response to { len ( messages ) } messages using { self . chat_model } ... " )
response = openai . ChatCompletion . create (
model = self . chat_model ,
messages = messages ,
2023-05-01 06:11:43 +00:00
api_key = self . api_key ,
user = user
2023-04-28 10:01:27 +00:00
)
result_text = response . choices [ 0 ] . message [ ' content ' ]
tokens_used = response . usage [ " total_tokens " ]
self . logger . log ( f " Generated response with { tokens_used } tokens. " )
return result_text , tokens_used
2023-05-01 06:11:43 +00:00
def classify_message ( self , query : str , user : Optional [ str ] = None ) - > Tuple [ Dict [ str , str ] , int ] :
2023-05-01 15:49:26 +00:00
system_message = """ You are a classifier for different types of messages. You decide whether an incoming message is meant to be a prompt for an AI chat model, or meant for a different API. You respond with a JSON object like this:
2023-05-01 06:11:43 +00:00
{ " type " : event_type , " prompt " : prompt }
- If the message you received is meant for the AI chat model , the event_type is " chat " , and the prompt is the literal content of the message you received . This is also the default if none of the other options apply .
- If it is a prompt for a calculation that can be answered better by WolframAlpha than an AI chat bot , the event_type is " calculate " . Optimize the message you received for input to WolframAlpha , and return it as the prompt attribute .
- If it is a prompt for an AI image generation , the event_type is " imagine " . Optimize the message you received for use with DALL - E , and return it as the prompt attribute .
2023-05-01 15:49:26 +00:00
- If the user is asking you to create a new room , the event_type is " newroom " , and the prompt is the name of the room , if one is given , else an empty string .
- If the user is asking you to throw a coin , the event_type is " coin " . The prompt is an empty string .
- If the user is asking you to roll a dice , the event_type is " dice " . The prompt is an string containing an optional number of sides , if one is given , else an empty string .
2023-05-01 06:11:43 +00:00
- If for any reason you are unable to classify the message ( for example , if it infringes on your terms of service ) , the event_type is " error " , and the prompt is a message explaining why you are unable to process the message .
Only the event_types mentioned above are allowed , you must not respond in any other way . """
messages = [
{
" role " : " system " ,
" content " : system_message
} ,
{
" role " : " user " ,
" content " : query
}
]
self . logger . log ( f " Classifying message ' { query } ' ... " )
response = openai . ChatCompletion . create (
model = self . chat_model ,
messages = messages ,
api_key = self . api_key ,
user = user
)
try :
result = json . loads ( response . choices [ 0 ] . message [ ' content ' ] )
except :
result = { " type " : " chat " , " prompt " : query }
tokens_used = response . usage [ " total_tokens " ]
self . logger . log ( f " Classified message as { result [ ' type ' ] } with { tokens_used } tokens. " )
return result , tokens_used
def generate_image ( self , prompt : str , user : Optional [ str ] = None ) - > Generator [ bytes , None , None ] :
2023-04-28 10:01:27 +00:00
""" Generate an image from a prompt.
Args :
prompt ( str ) : The prompt to use .
Yields :
bytes : The image data .
"""
self . logger . log ( f " Generating image from prompt ' { prompt } ' ... " )
response = openai . Image . create (
prompt = prompt ,
n = 1 ,
api_key = self . api_key ,
2023-05-01 06:11:43 +00:00
size = " 1024x1024 " ,
user = user
2023-04-28 10:01:27 +00:00
)
2023-05-01 06:11:43 +00:00
images = [ ]
2023-04-28 10:01:27 +00:00
for image in response . data :
image = requests . get ( image . url ) . content
2023-05-01 06:11:43 +00:00
images . append ( image )
return images , len ( images )