ScreamingOpossum/openai_interaction/image_recognition_test.py

47 lines
1.5 KiB
Python
Raw Normal View History

from openai import OpenAI
client = OpenAI()
SUPPORTED_IMAGE_EXTENSIONS = ('.png', '.jpg', '.jpeg', '.webp', '.gif')
async def image_recognition(content: str, files: list[dict[str: str]]) -> str:
input_data = [{'type': 'input_text', 'text': content}]
input_data.extend(
[
{
'type': 'input_image',
'image_url': i_url.get('url'),
}
for i_url in files if i_url.get('filename').lower().endswith(SUPPORTED_IMAGE_EXTENSIONS)
]
)
response = client.responses.create(
model='gpt-4.1-mini',
input=[{
'role': 'user',
'content': input_data,
}],
)
return response.output_text
if __name__ == '__main__':
data = {
'content': 'What\'s on these images?',
'files': [
{
'filename': 'Вы_продоёте_рыбов.jpg',
'url': 'https://upload.wikimedia.org/wikipedia/ru/8/8a/%D0%92%D1%8B_%D0%BF%D1%80%D0%BE%D0%B4%D0%BE%D1%91%D1%82%D0%B5_%D1%80%D1%8B%D0%B1%D0%BE%D0%B2.jpg',
},
{
'filename': 'Putin_and_Macron_meeting_with_a_large_table.jpg',
'url': 'https://upload.wikimedia.org/wikipedia/commons/a/a9/Putin_and_Macron_meeting_with_a_large_table.jpg',
},
{
'filename': 'shitty_pdf_file.pdf',
'url': 'https://example.com/shitty_pdf_file.pdf'
}
],
}
print(image_recognition(**data))