40 lines
1.9 KiB
Python
40 lines
1.9 KiB
Python
import asyncio
|
|
from discord import Message
|
|
from reginaldCog.messenger_clients.messenger_client import ClientMessage, DiscordMessageAdapter as MessengerDiscordAdapter
|
|
from reginaldCog.llm_clients.llm_client import LMMClientType, MessengerClientMessageAdapter
|
|
|
|
|
|
class MessageService:
|
|
def __init__(self, message: Message, llm_client: LMMClientType = LMMClientType.OPENAI):
|
|
self.message = message
|
|
self.llm_client = llm_client
|
|
|
|
async def get_llm_response(self) -> str:
|
|
# Adapt discord.Message to ClientMessage domain object
|
|
client_message: ClientMessage = MessengerDiscordAdapter().create_message(self.message)
|
|
|
|
# Create prompt and prompt builder for this LLM client
|
|
prompt = self.llm_client.value.prompt_class()
|
|
prompt_builder = self.llm_client.value.prompt_builder_class(prompt)
|
|
|
|
# Adapt the messenger client message into LLM message and add it to prompt
|
|
llm_message = MessengerClientMessageAdapter(client_message, self.llm_client).to_message()
|
|
prompt_builder.add_message(llm_message)
|
|
|
|
# Call the LLM client; run in executor if method is blocking sync
|
|
llm_client_instance = self.llm_client.value()
|
|
|
|
loop = asyncio.get_running_loop()
|
|
# Assuming get_response is blocking - run in executor:
|
|
response = await loop.run_in_executor(None, llm_client_instance.get_response, prompt)
|
|
|
|
# Extract plain text from the response (assuming OpenAIResponseAdapter is present)
|
|
from reginaldCog.llm_clients.llm_client import OpenAIResponseAdapter
|
|
|
|
response_adapter = OpenAIResponseAdapter(response)
|
|
message_obj = response_adapter.to_message()
|
|
|
|
# Concatenate all textual outputs for sending to Discord
|
|
texts = [item.get("text", "") for item in message_obj.content.content_items if item.get("type") == "output_text"]
|
|
return "\n".join(texts) if texts else "Sorry, no response generated."
|