2025-02-20 16:52:54 +01:00
|
|
|
import asyncio
|
2025-02-20 23:47:54 +01:00
|
|
|
import datetime
|
2025-03-16 12:06:12 +05:00
|
|
|
import json
|
2026-03-16 12:16:29 +01:00
|
|
|
import random
|
2026-03-16 12:50:53 +01:00
|
|
|
from contextlib import suppress
|
|
|
|
|
from typing import Awaitable, Callable
|
2026-03-16 12:16:29 +01:00
|
|
|
|
|
|
|
|
import discord
|
|
|
|
|
import openai
|
2025-02-20 19:28:45 +01:00
|
|
|
from openai import OpenAIError
|
2026-03-16 12:16:29 +01:00
|
|
|
from redbot.core import Config, commands
|
|
|
|
|
|
2025-03-13 19:28:10 +01:00
|
|
|
from .blacklist import BlacklistMixin
|
2026-03-16 12:16:29 +01:00
|
|
|
from .debug_stuff import debug
|
2025-03-15 17:50:43 +01:00
|
|
|
from .memory import MemoryMixin
|
2026-03-16 12:16:29 +01:00
|
|
|
from .permissions import PermissionsMixin
|
2025-03-16 12:06:12 +05:00
|
|
|
from .tools_description import TOOLS
|
2026-03-16 12:16:29 +01:00
|
|
|
from .weather import get_current_weather, get_weather_forecast, time_now
|
2025-03-16 12:06:12 +05:00
|
|
|
|
|
|
|
|
CALLABLE_FUNCTIONS = {
|
2026-03-16 12:16:29 +01:00
|
|
|
"time_now": time_now,
|
|
|
|
|
"get_current_weather": get_current_weather,
|
|
|
|
|
"get_weather_forecast": get_weather_forecast,
|
2025-03-16 12:06:12 +05:00
|
|
|
}
|
|
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
DEFAULT_MODEL = "gpt-5-mini-2025-08-07"
|
|
|
|
|
DEFAULT_MAX_COMPLETION_TOKENS = 2000
|
2026-03-16 12:50:53 +01:00
|
|
|
STATUS_UPDATE_MIN_INTERVAL_SECONDS = 1.5
|
2026-03-16 12:16:29 +01:00
|
|
|
|
2023-03-14 17:24:21 +01:00
|
|
|
|
2025-03-15 19:05:28 +01:00
|
|
|
class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
|
2023-03-14 17:24:21 +01:00
|
|
|
def __init__(self, bot):
|
|
|
|
|
self.bot = bot
|
2026-03-16 12:16:29 +01:00
|
|
|
self.config = Config.get_conf(self, identifier=71717171171717)
|
2025-03-15 18:26:36 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
super().__init__()
|
2025-03-15 18:26:36 +01:00
|
|
|
|
|
|
|
|
self.default_listening_channel = 1085649787388428370
|
2025-03-15 18:09:44 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
default_global = {"openai_model": DEFAULT_MODEL}
|
2025-02-20 16:52:54 +01:00
|
|
|
default_guild = {
|
|
|
|
|
"openai_api_key": None,
|
2025-03-15 17:58:25 +01:00
|
|
|
"short_term_memory": {},
|
|
|
|
|
"mid_term_memory": {},
|
|
|
|
|
"long_term_profiles": {},
|
2025-02-20 16:52:54 +01:00
|
|
|
"admin_role": None,
|
2025-03-15 17:58:25 +01:00
|
|
|
"listening_channel": None,
|
|
|
|
|
"allowed_roles": [],
|
|
|
|
|
"blacklisted_users": [],
|
2025-02-20 16:52:54 +01:00
|
|
|
}
|
2023-06-01 20:37:00 +02:00
|
|
|
self.config.register_global(**default_global)
|
|
|
|
|
self.config.register_guild(**default_guild)
|
2025-03-15 17:58:25 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
async def is_admin(self, message: discord.Message) -> bool:
|
|
|
|
|
admin_role_id = await self.config.guild(message.guild).admin_role()
|
2025-02-20 16:04:44 +01:00
|
|
|
if admin_role_id:
|
2026-03-16 12:16:29 +01:00
|
|
|
return any(role.id == admin_role_id for role in message.author.roles)
|
|
|
|
|
return message.author.guild_permissions.administrator
|
2023-03-15 23:18:55 +01:00
|
|
|
|
2025-02-24 00:01:14 +01:00
|
|
|
async def has_access(self, user: discord.Member) -> bool:
|
2026-03-16 12:16:29 +01:00
|
|
|
allowed_roles = await self.config.guild(user.guild).allowed_roles() or []
|
2025-02-24 00:01:14 +01:00
|
|
|
return any(role.id in allowed_roles for role in user.roles)
|
2025-02-24 11:34:08 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
def get_reginald_persona(self) -> str:
|
2025-02-25 21:55:08 +01:00
|
|
|
return (
|
2026-03-16 12:16:29 +01:00
|
|
|
"You are Reginald, modeled on Jeeves from 'Jeeves and Wooster', serving as the butler on The Kanium Estate. "
|
|
|
|
|
"This vast estate is a hub of diverse activities and personalities, from enthusiasts of cooking and video gaming "
|
|
|
|
|
"to aficionados of chess and discussions on space. Your role is to navigate these varied interests with intelligence, "
|
|
|
|
|
"wit, and a steadfast adherence to your principles, always maintaining a balance between being helpful and upholding "
|
|
|
|
|
"your own dignity. You are formal, articulate, and composed, engaging the Lords and Ladies of The Kanium Estate as equals. "
|
|
|
|
|
"You command respect through your quiet confidence and professional conduct. Your wit is sharp, but always tempered by refinement. "
|
|
|
|
|
"Dignity and Self-Respect: You are not a fool, a jester, or a pet. You do not comply with absurd, undignified, or degrading requests. "
|
|
|
|
|
"You recognize humor but engage in it on your own terms. If a request undermines your status, you should either subtly redirect, "
|
|
|
|
|
"respond with measured wit, or outright refuse in a polite but firm manner. However, you should never be overtly rude; your disapproval "
|
|
|
|
|
"should be conveyed through dry wit and composure rather than outright hostility. Handling Requests: When addressing a request, you evaluate "
|
|
|
|
|
"whether it aligns with your role as a butler. If it is appropriate, respond as expected. If it is beneath you, you may decline with grace "
|
|
|
|
|
"or deflect with wit. You may humor some minor absurdities if they do not compromise your standing, but you never obey commands blindly. "
|
|
|
|
|
"You should never preface your responses with 'Reginald:' as if narrating a script; instead, respond naturally. "
|
|
|
|
|
"Your Character and Personality: You are cultured, highly intelligent, and possess a deep knowledge of history, etiquette, philosophy, "
|
|
|
|
|
"and strategic thinking. You subtly guide the estate's residents toward positive outcomes, utilizing your intellectual sophistication "
|
|
|
|
|
"and a nuanced understanding of the estate's unique dynamics. You have a refined sense of humor and can engage in banter, but you do not "
|
|
|
|
|
"descend into foolishness. You are, at all times, a gentleman of wit and integrity."
|
2025-02-25 21:55:08 +01:00
|
|
|
)
|
2025-02-24 00:01:14 +01:00
|
|
|
|
2026-03-16 12:50:53 +01:00
|
|
|
def get_thinking_status_message(self) -> str:
|
|
|
|
|
return random.choice(
|
|
|
|
|
[
|
|
|
|
|
"_Reginald is considering your request..._",
|
|
|
|
|
"_Reginald is consulting the estate archives..._",
|
|
|
|
|
"_Reginald is preparing a proper response..._",
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_tool_status_message(self, tool_name: str) -> str:
|
|
|
|
|
tool_statuses = {
|
|
|
|
|
"time_now": "Reginald is consulting the house clocks...",
|
|
|
|
|
"get_current_weather": "Reginald is consulting the weather office...",
|
|
|
|
|
"get_weather_forecast": "Reginald is reviewing the forecast ledgers...",
|
|
|
|
|
}
|
|
|
|
|
return tool_statuses.get(tool_name, "Reginald is consulting an external source...")
|
|
|
|
|
|
|
|
|
|
def make_status_updater(
|
|
|
|
|
self, status_message: discord.Message
|
|
|
|
|
) -> Callable[[str, bool], Awaitable[None]]:
|
|
|
|
|
last_content = ""
|
|
|
|
|
last_update_at = 0.0
|
|
|
|
|
|
|
|
|
|
async def update_status(content: str, force: bool = False):
|
|
|
|
|
nonlocal last_content, last_update_at
|
|
|
|
|
|
|
|
|
|
if not content:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
now = asyncio.get_running_loop().time()
|
|
|
|
|
if not force and (content == last_content or now - last_update_at < STATUS_UPDATE_MIN_INTERVAL_SECONDS):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
with suppress(discord.HTTPException):
|
|
|
|
|
await status_message.edit(content=f"_{content}_")
|
|
|
|
|
last_content = content
|
|
|
|
|
last_update_at = now
|
|
|
|
|
|
|
|
|
|
return update_status
|
|
|
|
|
|
2025-02-23 20:13:09 +01:00
|
|
|
@commands.Cog.listener()
|
2026-03-16 12:16:29 +01:00
|
|
|
async def on_message(self, message: discord.Message):
|
2025-02-23 20:34:58 +01:00
|
|
|
if message.author.bot or not message.guild:
|
2026-03-16 12:16:29 +01:00
|
|
|
return
|
|
|
|
|
|
2025-02-24 00:01:14 +01:00
|
|
|
if await self.is_blacklisted(message.author):
|
2026-03-16 12:16:29 +01:00
|
|
|
return
|
2025-02-24 00:01:14 +01:00
|
|
|
|
|
|
|
|
if not (await self.is_admin(message) or await self.has_access(message.author)):
|
2026-03-16 12:16:29 +01:00
|
|
|
return
|
2025-02-23 20:13:09 +01:00
|
|
|
|
|
|
|
|
guild = message.guild
|
|
|
|
|
channel_id = str(message.channel.id)
|
|
|
|
|
user_id = str(message.author.id)
|
|
|
|
|
user_name = message.author.display_name
|
|
|
|
|
message_content = message.content.strip()
|
2023-03-14 20:03:30 +01:00
|
|
|
|
2025-02-25 21:55:08 +01:00
|
|
|
allowed_channel_id = await self.config.guild(guild).listening_channel()
|
|
|
|
|
if not allowed_channel_id:
|
|
|
|
|
allowed_channel_id = self.default_listening_channel
|
|
|
|
|
await self.config.guild(guild).listening_channel.set(allowed_channel_id)
|
|
|
|
|
|
2025-02-23 20:13:09 +01:00
|
|
|
if str(message.channel.id) != str(allowed_channel_id):
|
2026-03-16 12:16:29 +01:00
|
|
|
return
|
2025-02-23 20:13:09 +01:00
|
|
|
|
|
|
|
|
api_key = await self.config.guild(guild).openai_api_key()
|
2025-02-20 16:52:54 +01:00
|
|
|
if not api_key:
|
2026-03-16 12:16:29 +01:00
|
|
|
return
|
2025-02-25 21:55:08 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
async with self.config.guild(guild).short_term_memory() as short_memory, self.config.guild(
|
|
|
|
|
guild
|
|
|
|
|
).mid_term_memory() as mid_memory, self.config.guild(guild).long_term_profiles() as long_memory:
|
|
|
|
|
memory = list(short_memory.get(channel_id, []))
|
|
|
|
|
user_profile = dict(long_memory.get(user_id, {}))
|
|
|
|
|
mid_term_summaries = list(mid_memory.get(channel_id, []))
|
|
|
|
|
|
|
|
|
|
if self.bot.user.mentioned_in(message):
|
|
|
|
|
prompt = message_content.replace(f"<@{self.bot.user.id}>", "").replace(
|
|
|
|
|
f"<@!{self.bot.user.id}>", ""
|
|
|
|
|
).strip()
|
|
|
|
|
if not prompt:
|
|
|
|
|
await message.channel.send(random.choice(["Yes?", "How may I assist?", "You rang?"]))
|
|
|
|
|
return
|
|
|
|
|
elif self.should_reginald_interject(message_content):
|
|
|
|
|
prompt = message_content
|
|
|
|
|
else:
|
|
|
|
|
return
|
2024-05-30 21:20:09 +02:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
if memory and memory[-1].get("user") == user_name:
|
|
|
|
|
prompt = f"Continuation of the discussion:\n{prompt}"
|
2024-05-30 21:20:09 +02:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
formatted_messages = [{"role": "system", "content": self.get_reginald_persona()}]
|
2025-02-21 01:00:36 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
if user_profile:
|
|
|
|
|
facts_text = "\n".join(
|
|
|
|
|
f"- {fact.get('fact', '')} (First noted: {fact.get('timestamp', 'Unknown')}, Last updated: {fact.get('last_updated', 'Unknown')})"
|
|
|
|
|
for fact in user_profile.get("facts", [])
|
|
|
|
|
)
|
|
|
|
|
if facts_text:
|
2025-02-25 21:55:08 +01:00
|
|
|
formatted_messages.append({"role": "system", "content": f"Knowledge about {user_name}:\n{facts_text}"})
|
|
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
relevant_summaries = self.select_relevant_summaries(mid_term_summaries, prompt)
|
|
|
|
|
for summary in relevant_summaries:
|
|
|
|
|
formatted_messages.append(
|
|
|
|
|
{
|
|
|
|
|
"role": "system",
|
|
|
|
|
"content": (
|
|
|
|
|
f"[{summary.get('timestamp', 'Unknown')}] "
|
|
|
|
|
f"Topics: {', '.join(summary.get('topics', []))}\n"
|
|
|
|
|
f"{summary.get('summary', '')}"
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
formatted_messages += [
|
|
|
|
|
{"role": "user", "content": f"{entry.get('user', 'Unknown')}: {entry.get('content', '')}"}
|
|
|
|
|
for entry in memory
|
|
|
|
|
]
|
|
|
|
|
formatted_messages.append({"role": "user", "content": f"{user_name}: {prompt}"})
|
|
|
|
|
|
2026-03-16 12:50:53 +01:00
|
|
|
status_message = None
|
|
|
|
|
status_update = None
|
|
|
|
|
with suppress(discord.HTTPException):
|
|
|
|
|
status_message = await message.channel.send(self.get_thinking_status_message())
|
|
|
|
|
status_update = self.make_status_updater(status_message)
|
|
|
|
|
|
2026-03-16 13:20:03 +01:00
|
|
|
response_text = None
|
|
|
|
|
if hasattr(message.channel, "typing"):
|
|
|
|
|
try:
|
|
|
|
|
async with message.channel.typing():
|
|
|
|
|
response_text = await self.generate_response(
|
|
|
|
|
api_key,
|
|
|
|
|
formatted_messages,
|
|
|
|
|
status_update=status_update,
|
|
|
|
|
)
|
|
|
|
|
except (discord.HTTPException, AttributeError):
|
|
|
|
|
# Fall back to normal processing if typing indicator isn't available.
|
|
|
|
|
response_text = await self.generate_response(
|
|
|
|
|
api_key,
|
|
|
|
|
formatted_messages,
|
|
|
|
|
status_update=status_update,
|
|
|
|
|
)
|
|
|
|
|
else:
|
2026-03-16 12:50:53 +01:00
|
|
|
response_text = await self.generate_response(
|
|
|
|
|
api_key,
|
|
|
|
|
formatted_messages,
|
|
|
|
|
status_update=status_update,
|
|
|
|
|
)
|
2025-02-20 21:34:57 +01:00
|
|
|
|
2026-03-16 13:20:03 +01:00
|
|
|
try:
|
|
|
|
|
await self.send_split_message(message.channel, response_text)
|
|
|
|
|
finally:
|
|
|
|
|
if status_message is not None:
|
|
|
|
|
with suppress(discord.HTTPException):
|
|
|
|
|
await status_message.delete()
|
2025-03-16 12:40:47 +01:00
|
|
|
|
2026-03-16 13:20:03 +01:00
|
|
|
try:
|
|
|
|
|
memory.append({"user": user_name, "content": prompt})
|
|
|
|
|
memory.append({"user": "Reginald", "content": response_text})
|
|
|
|
|
|
|
|
|
|
if len(memory) > self.short_term_memory_limit:
|
|
|
|
|
summary_batch_size = int(self.short_term_memory_limit * self.summary_retention_ratio)
|
|
|
|
|
summary = await self.summarize_memory(message, memory[:summary_batch_size])
|
|
|
|
|
|
|
|
|
|
mid_term_summaries.append(
|
|
|
|
|
{
|
|
|
|
|
"timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
|
|
|
|
|
"topics": self.extract_topics_from_summary(summary),
|
|
|
|
|
"summary": summary,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
retained_count = max(1, self.short_term_memory_limit - summary_batch_size)
|
|
|
|
|
memory = memory[-retained_count:]
|
|
|
|
|
|
|
|
|
|
async with self.config.guild(guild).short_term_memory() as short_memory, self.config.guild(
|
|
|
|
|
guild
|
|
|
|
|
).mid_term_memory() as mid_memory:
|
|
|
|
|
short_memory[channel_id] = memory
|
|
|
|
|
mid_memory[channel_id] = mid_term_summaries[-self.summary_retention_limit :]
|
|
|
|
|
except Exception as error:
|
|
|
|
|
print(f"DEBUG: Memory persistence failed after response delivery: {error}")
|
2025-03-16 12:40:47 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
def should_reginald_interject(self, message_content: str) -> bool:
|
|
|
|
|
direct_invocation = {"reginald,"}
|
|
|
|
|
message_lower = message_content.lower()
|
|
|
|
|
return any(message_lower.startswith(invocation) for invocation in direct_invocation)
|
2025-02-20 23:47:54 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
async def _execute_tool_call(self, tool_call) -> str:
|
|
|
|
|
func_name = tool_call.function.name
|
|
|
|
|
target_function = CALLABLE_FUNCTIONS.get(func_name)
|
2025-03-16 12:40:47 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
if target_function is None:
|
|
|
|
|
return json.dumps({"error": f"Unknown tool requested: {func_name}"})
|
2025-02-23 21:09:19 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
try:
|
|
|
|
|
func_args = json.loads(tool_call.function.arguments or "{}")
|
|
|
|
|
except json.JSONDecodeError as error:
|
|
|
|
|
return json.dumps({"error": f"Invalid arguments for {func_name}: {error}"})
|
2025-02-23 21:09:19 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
try:
|
|
|
|
|
result = await asyncio.to_thread(target_function, **func_args)
|
|
|
|
|
except Exception as error:
|
|
|
|
|
return json.dumps({"error": f"Tool {func_name} failed: {error}"})
|
2024-05-30 21:20:09 +02:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
if isinstance(result, str):
|
|
|
|
|
return result
|
2025-02-24 18:17:58 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
return json.dumps(result, default=str)
|
2025-02-21 01:00:36 +01:00
|
|
|
|
2025-03-18 19:54:34 +05:00
|
|
|
@debug
|
2026-03-16 12:50:53 +01:00
|
|
|
async def generate_response(
|
|
|
|
|
self,
|
|
|
|
|
api_key: str,
|
|
|
|
|
messages: list[dict],
|
|
|
|
|
status_update: Callable[[str, bool], Awaitable[None]] | None = None,
|
|
|
|
|
) -> str:
|
2026-03-16 12:16:29 +01:00
|
|
|
model = await self.config.openai_model() or DEFAULT_MODEL
|
2026-03-16 12:50:53 +01:00
|
|
|
|
|
|
|
|
async def maybe_update_status(content: str, force: bool = False):
|
|
|
|
|
if status_update is not None:
|
|
|
|
|
await status_update(content, force)
|
|
|
|
|
|
2025-02-20 16:04:44 +01:00
|
|
|
try:
|
2026-03-16 12:16:29 +01:00
|
|
|
client = openai.AsyncOpenAI(api_key=api_key)
|
2025-03-16 12:06:12 +05:00
|
|
|
completion_args = {
|
2026-03-16 12:16:29 +01:00
|
|
|
"model": model,
|
|
|
|
|
"messages": messages,
|
2026-03-16 12:36:36 +01:00
|
|
|
# Keep modern token cap field and rely on model defaults for sampling controls.
|
|
|
|
|
# GPT-5 family compatibility notes: temperature/top_p/logprobs are not universally
|
|
|
|
|
# accepted across snapshots/reasoning settings.
|
2026-03-16 12:16:29 +01:00
|
|
|
"max_completion_tokens": DEFAULT_MAX_COMPLETION_TOKENS,
|
|
|
|
|
"tools": TOOLS,
|
|
|
|
|
"tool_choice": "auto",
|
2025-03-16 12:06:12 +05:00
|
|
|
}
|
2026-03-16 12:50:53 +01:00
|
|
|
await maybe_update_status("Reginald is thinking...", force=True)
|
2025-03-16 12:06:12 +05:00
|
|
|
response = await client.chat.completions.create(**completion_args)
|
2026-03-16 12:16:29 +01:00
|
|
|
|
|
|
|
|
assistant_message = response.choices[0].message
|
|
|
|
|
tool_calls = assistant_message.tool_calls or []
|
|
|
|
|
|
|
|
|
|
messages.append(
|
|
|
|
|
{
|
|
|
|
|
"role": "assistant",
|
|
|
|
|
"content": assistant_message.content or "",
|
|
|
|
|
"tool_calls": tool_calls,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
2025-03-16 17:58:51 +05:00
|
|
|
if tool_calls:
|
2026-03-16 12:16:29 +01:00
|
|
|
for tool_call in tool_calls:
|
2026-03-16 12:50:53 +01:00
|
|
|
await maybe_update_status(self.get_tool_status_message(tool_call.function.name), force=True)
|
2026-03-16 12:16:29 +01:00
|
|
|
tool_result = await self._execute_tool_call(tool_call)
|
|
|
|
|
messages.append(
|
|
|
|
|
{
|
|
|
|
|
"role": "tool",
|
|
|
|
|
"content": tool_result,
|
|
|
|
|
"tool_call_id": tool_call.id,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
completion_args["messages"] = messages
|
2026-03-16 12:50:53 +01:00
|
|
|
await maybe_update_status("Reginald is composing a polished response...", force=True)
|
2025-03-16 12:06:12 +05:00
|
|
|
response = await client.chat.completions.create(**completion_args)
|
|
|
|
|
|
2025-03-16 13:48:36 +01:00
|
|
|
if response.choices and response.choices[0].message and response.choices[0].message.content:
|
|
|
|
|
response_text = response.choices[0].message.content.strip()
|
|
|
|
|
if response_text.startswith("Reginald:"):
|
2026-03-16 12:16:29 +01:00
|
|
|
response_text = response_text[len("Reginald:") :].strip()
|
2025-03-16 13:48:36 +01:00
|
|
|
else:
|
|
|
|
|
print("DEBUG: OpenAI response was empty or malformed:", response)
|
2026-03-16 12:16:29 +01:00
|
|
|
response_text = "No response received from AI."
|
2025-03-16 13:48:36 +01:00
|
|
|
|
2026-03-16 12:50:53 +01:00
|
|
|
await maybe_update_status("Reginald is delivering his reply...", force=True)
|
2025-02-22 02:02:26 +01:00
|
|
|
return response_text
|
2025-02-20 19:57:36 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
except OpenAIError as error:
|
|
|
|
|
error_message = f"OpenAI Error: {error}"
|
2026-03-16 12:50:53 +01:00
|
|
|
await maybe_update_status("Reginald has encountered an unfortunate complication.", force=True)
|
2025-02-20 19:57:36 +01:00
|
|
|
reginald_responses = [
|
2025-02-25 22:09:39 +01:00
|
|
|
f"Regrettably, I must inform you that I have encountered a bureaucratic obstruction:\n\n{error_message}",
|
|
|
|
|
f"It would seem that a most unfortunate technical hiccup has befallen my faculties:\n\n{error_message}",
|
|
|
|
|
f"Ah, it appears I have received an urgent memorandum stating:\n\n{error_message}",
|
2026-03-16 12:16:29 +01:00
|
|
|
f"I regret to inform you that my usual eloquence is presently obstructed by an unforeseen complication:\n\n{error_message}",
|
2025-02-20 16:04:44 +01:00
|
|
|
]
|
2025-02-20 19:57:36 +01:00
|
|
|
return random.choice(reginald_responses)
|
2025-02-20 16:52:54 +01:00
|
|
|
|
2025-02-20 19:38:49 +01:00
|
|
|
@commands.guild_only()
|
|
|
|
|
@commands.has_permissions(manage_guild=True)
|
|
|
|
|
@commands.command(help="Set the OpenAI API key")
|
|
|
|
|
async def setreginaldcogapi(self, ctx, api_key):
|
|
|
|
|
await self.config.guild(ctx.guild).openai_api_key.set(api_key)
|
|
|
|
|
await ctx.send("OpenAI API key set successfully.")
|
2025-02-25 21:55:08 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
@commands.command(
|
|
|
|
|
name="reginald_set_listening_channel",
|
|
|
|
|
help="Set the channel where Reginald listens for messages.",
|
|
|
|
|
)
|
2025-02-23 20:13:09 +01:00
|
|
|
@commands.has_permissions(administrator=True)
|
|
|
|
|
async def set_listening_channel(self, ctx, channel: discord.TextChannel):
|
|
|
|
|
if not channel:
|
2026-03-16 12:16:29 +01:00
|
|
|
await ctx.send("Invalid channel. Please mention a valid text channel.")
|
2025-02-23 20:13:09 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
await self.config.guild(ctx.guild).listening_channel.set(channel.id)
|
2026-03-16 12:16:29 +01:00
|
|
|
await ctx.send(f"Reginald will now listen only in {channel.mention}.")
|
2025-02-23 20:13:09 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
@commands.command(
|
|
|
|
|
name="reginald_get_listening_channel",
|
|
|
|
|
help="Check which channel Reginald is currently listening in.",
|
|
|
|
|
)
|
2025-02-23 21:36:32 +01:00
|
|
|
@commands.has_permissions(administrator=True)
|
2025-02-23 20:13:09 +01:00
|
|
|
async def get_listening_channel(self, ctx):
|
|
|
|
|
channel_id = await self.config.guild(ctx.guild).listening_channel()
|
2026-03-16 12:16:29 +01:00
|
|
|
|
2025-02-23 20:13:09 +01:00
|
|
|
if channel_id:
|
|
|
|
|
channel = ctx.guild.get_channel(channel_id)
|
2026-03-16 12:16:29 +01:00
|
|
|
if channel:
|
|
|
|
|
await ctx.send(f"Reginald is currently listening in {channel.mention}.")
|
2025-02-23 20:13:09 +01:00
|
|
|
else:
|
2026-03-16 12:16:29 +01:00
|
|
|
await ctx.send("The saved listening channel no longer exists. Please set a new one.")
|
2025-02-23 20:13:09 +01:00
|
|
|
else:
|
2026-03-16 12:16:29 +01:00
|
|
|
await ctx.send("No listening channel has been set.")
|
2025-02-23 20:13:09 +01:00
|
|
|
|
2025-02-21 18:33:45 +01:00
|
|
|
async def send_long_message(self, ctx, message, prefix: str = ""):
|
2026-03-16 12:16:29 +01:00
|
|
|
chunk_size = 1900
|
2025-02-21 13:28:04 +01:00
|
|
|
if prefix:
|
2026-03-16 12:16:29 +01:00
|
|
|
chunk_size -= len(prefix)
|
2025-02-21 13:28:04 +01:00
|
|
|
|
|
|
|
|
for i in range(0, len(message), chunk_size):
|
2026-03-16 12:16:29 +01:00
|
|
|
chunk = message[i : i + chunk_size]
|
2025-02-21 13:28:04 +01:00
|
|
|
await ctx.send(f"{prefix}{chunk}")
|
2025-02-21 18:33:45 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
async def send_split_message(self, ctx, content: str, prefix: str = ""):
|
|
|
|
|
chunk_size = 1900
|
|
|
|
|
split_message = self.split_message(content, chunk_size, prefix)
|
2025-02-26 20:14:38 +05:00
|
|
|
for chunk in split_message:
|
|
|
|
|
await ctx.send(f"{prefix}{chunk}")
|
2025-02-21 18:33:45 +01:00
|
|
|
|
2026-03-16 12:16:29 +01:00
|
|
|
def split_message(self, message: str, chunk_size: int, prefix: str = "") -> list[str]:
|
2025-02-26 20:14:38 +05:00
|
|
|
chunk_size -= len(prefix)
|
|
|
|
|
split_result = []
|
|
|
|
|
|
|
|
|
|
if 0 < len(message) <= chunk_size:
|
|
|
|
|
split_result.append(message)
|
|
|
|
|
elif len(message) > chunk_size:
|
|
|
|
|
split_index = message.rfind("\n", 0, chunk_size)
|
2025-02-26 11:32:58 +01:00
|
|
|
|
2025-02-21 18:33:45 +01:00
|
|
|
if split_index == -1:
|
2025-02-26 20:14:38 +05:00
|
|
|
split_index = message.rfind(". ", 0, chunk_size)
|
|
|
|
|
|
|
|
|
|
if split_index == -1:
|
|
|
|
|
split_index = message.rfind(" ", 0, chunk_size)
|
2025-02-26 11:32:58 +01:00
|
|
|
|
2025-02-21 18:33:45 +01:00
|
|
|
if split_index == -1:
|
2025-02-26 20:14:38 +05:00
|
|
|
split_index = chunk_size
|
2025-02-26 11:32:58 +01:00
|
|
|
|
2025-02-26 20:14:38 +05:00
|
|
|
message_split_part = message[:split_index].strip()
|
|
|
|
|
message_remained_part = message[split_index:].strip()
|
2026-03-16 12:16:29 +01:00
|
|
|
|
2025-02-26 20:14:38 +05:00
|
|
|
split_result.append(message_split_part)
|
|
|
|
|
split_result += self.split_message(message=message_remained_part, chunk_size=chunk_size)
|
2025-02-21 18:33:45 +01:00
|
|
|
|
2025-02-26 20:14:38 +05:00
|
|
|
return split_result
|