Refactor image generation code with dynamic size and model
The commit modifies the image generation code in the OpenAI class. The size and model of the generated image can now be dynamically set based on the provided prompt. The code has been refactored to handle different image sizes and models correctly.
This commit is contained in:
parent
14da88de8b
commit
fbbe82a1fc
1 changed files with 15 additions and 1 deletions
|
@ -225,13 +225,27 @@ Only the event_types mentioned above are allowed, you must not respond in any ot
|
||||||
"""
|
"""
|
||||||
self.logger.log(f"Generating image from prompt '{prompt}'...")
|
self.logger.log(f"Generating image from prompt '{prompt}'...")
|
||||||
|
|
||||||
|
split_prompt = prompt.split()
|
||||||
|
|
||||||
|
size = "1024x1024"
|
||||||
|
|
||||||
|
if self.image_model == "dall-e-3":
|
||||||
|
if split_prompt[0] == "--portrait":
|
||||||
|
size = "1024x1792"
|
||||||
|
prompt = " ".join(split_prompt[1:])
|
||||||
|
elif split_prompt[0] == "--landscape":
|
||||||
|
size = "1792x1024"
|
||||||
|
prompt = " ".join(split_prompt[1:])
|
||||||
|
|
||||||
|
self.logger.log(f"Generating image with size {size} using model {self.image_model}...")
|
||||||
|
|
||||||
image_partial = partial(
|
image_partial = partial(
|
||||||
self.openai_api.images.generate,
|
self.openai_api.images.generate,
|
||||||
model=self.image_model,
|
model=self.image_model,
|
||||||
quality="standard" if self.image_model != "dall-e-3" else "hd",
|
quality="standard" if self.image_model != "dall-e-3" else "hd",
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
n=1,
|
n=1,
|
||||||
size="1024x1024",
|
size=size,
|
||||||
user=user,
|
user=user,
|
||||||
)
|
)
|
||||||
response = await self._request_with_retries(image_partial)
|
response = await self._request_with_retries(image_partial)
|
||||||
|
|
Loading…
Reference in a new issue