103 lines
3.9 KiB
Python
103 lines
3.9 KiB
Python
import asyncio
|
|
import json
|
|
import random
|
|
from os import environ
|
|
|
|
import openai
|
|
from openai import OpenAIError
|
|
|
|
try:
|
|
from .tools_description import TOOLS
|
|
from .weather import get_current_weather, get_weather_forecast, time_now
|
|
except ImportError:
|
|
from tools_description import TOOLS
|
|
from weather import get_current_weather, get_weather_forecast, time_now
|
|
|
|
|
|
CALLABLE_FUNCTIONS = {
|
|
"time_now": time_now,
|
|
"get_current_weather": get_current_weather,
|
|
"get_weather_forecast": get_weather_forecast,
|
|
}
|
|
|
|
|
|
class Completion:
|
|
def __init__(self, model: str, api_key: str):
|
|
self.__model = model
|
|
self.__api_key = api_key
|
|
self.__messages = []
|
|
|
|
async def create_completion(self):
|
|
try:
|
|
client = openai.AsyncOpenAI(api_key=self.__api_key)
|
|
completion_kwargs = {
|
|
"model": self.__model,
|
|
"messages": self.__messages,
|
|
"max_completion_tokens": 2000,
|
|
"tools": TOOLS,
|
|
"tool_choice": "auto",
|
|
}
|
|
|
|
response = await client.chat.completions.create(**completion_kwargs)
|
|
response_message = response.choices[0].message
|
|
response_content = response_message.content or ""
|
|
tool_calls = response_message.tool_calls or []
|
|
|
|
self.append_message(role="assistant", content=response_content, tool_calls=tool_calls)
|
|
|
|
if tool_calls:
|
|
for tool_call in tool_calls:
|
|
await self.function_manager(
|
|
func_name=tool_call.function.name,
|
|
func_kwargs=json.loads(tool_call.function.arguments or "{}"),
|
|
tool_call_id=tool_call.id,
|
|
)
|
|
return await self.create_completion()
|
|
|
|
return response_content
|
|
except OpenAIError as error:
|
|
return self.get_error_message(error_message=str(error), error_type="OpenAIError")
|
|
|
|
def append_message(self, role: str, content: str, tool_calls: list = None, tool_call_id: str = None):
|
|
message = {"role": role, "content": content}
|
|
if tool_calls is not None:
|
|
message["tool_calls"] = tool_calls
|
|
if tool_call_id is not None:
|
|
message["tool_call_id"] = tool_call_id
|
|
self.__messages.append(message)
|
|
|
|
@staticmethod
|
|
def get_error_message(error_message: str, error_type: str) -> str:
|
|
reginald_responses = [
|
|
"Regrettably, I must inform you that I have encountered a bureaucratic obstruction:",
|
|
"It would seem that a most unfortunate technical hiccup has befallen my faculties:",
|
|
"Ah, it appears I have received an urgent memorandum stating:",
|
|
"I regret to inform you that my usual eloquence is presently obstructed by an unforeseen complication:",
|
|
]
|
|
random_response = random.choice(reginald_responses)
|
|
return f"{random_response}\n\n{error_type}: {error_message}"
|
|
|
|
async def function_manager(self, func_name: str, func_kwargs: dict, tool_call_id: str):
|
|
function_to_call = CALLABLE_FUNCTIONS.get(func_name)
|
|
if function_to_call is None:
|
|
result = json.dumps({"error": f"Unknown tool requested: {func_name}"})
|
|
else:
|
|
try:
|
|
result = await asyncio.to_thread(function_to_call, **func_kwargs)
|
|
except Exception as error:
|
|
result = json.dumps({"error": f"Tool {func_name} failed: {error}"})
|
|
|
|
self.append_message(role="tool", content=result, tool_call_id=tool_call_id)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
async def main():
|
|
test_message = input("Your input: ")
|
|
completion = Completion(model="gpt-4.1-mini", api_key=environ.get("OPENAI_API_KEY"))
|
|
completion.append_message(role="user", content=test_message)
|
|
result = await completion.create_completion()
|
|
print(result)
|
|
|
|
asyncio.run(main())
|