diff --git a/openai_interaction/image_recognition_test.py b/openai_interaction/image_recognition_test.py index 1d05556..0899c23 100644 --- a/openai_interaction/image_recognition_test.py +++ b/openai_interaction/image_recognition_test.py @@ -3,26 +3,6 @@ from openai import OpenAI client = OpenAI() SUPPORTED_IMAGE_EXTENSIONS = ('.png', '.jpg', '.jpeg', '.webp', '.gif') -# response = client.responses.create( -# model="gpt-4.1-mini", -# input=[{ -# "role": "user", -# "content": [ -# {"type": "input_text", "text": "Что на этих картинках и в этом PDF файле?"}, -# { -# "type": "input_image", -# "image_url": "https://cdn.discordapp.com/attachments/1381924995994882150/1388385230108360855/2a264d9a62e7420586ee7873f53552d7.webp?ex=6860ca01&is=685f7881&hm=e8e21d0829cc9be3a21f63cb8f0c5065bf76c1de3952dbb524640694dd5037be&'>, ", -# }, -# ], -# }], -# ) -# -# print(response.output_text) - async def image_recognition(content: str, files: list[dict[str: str]]) -> str: input_data = [{'type': 'input_text', 'text': content}]