Compare commits

..

No commits in common. "master" and "9month-revision-reginald" have entirely different histories.

2 changed files with 29 additions and 119 deletions

View File

@ -34,6 +34,9 @@ class Completion:
"model": self.__model,
"messages": self.__messages,
"max_completion_tokens": 2000,
"temperature": 0.7,
"presence_penalty": 0.5,
"frequency_penalty": 0.5,
"tools": TOOLS,
"tool_choice": "auto",
}

View File

@ -2,8 +2,6 @@ import asyncio
import datetime
import json
import random
from contextlib import suppress
from typing import Awaitable, Callable
import discord
import openai
@ -25,7 +23,7 @@ CALLABLE_FUNCTIONS = {
DEFAULT_MODEL = "gpt-5-mini-2025-08-07"
DEFAULT_MAX_COMPLETION_TOKENS = 2000
STATUS_UPDATE_MIN_INTERVAL_SECONDS = 1.5
DEFAULT_TEMPERATURE = 0.7
class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
@ -82,46 +80,6 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
"descend into foolishness. You are, at all times, a gentleman of wit and integrity."
)
def get_thinking_status_message(self) -> str:
return random.choice(
[
"_Reginald is considering your request..._",
"_Reginald is consulting the estate archives..._",
"_Reginald is preparing a proper response..._",
]
)
def get_tool_status_message(self, tool_name: str) -> str:
tool_statuses = {
"time_now": "Reginald is consulting the house clocks...",
"get_current_weather": "Reginald is consulting the weather office...",
"get_weather_forecast": "Reginald is reviewing the forecast ledgers...",
}
return tool_statuses.get(tool_name, "Reginald is consulting an external source...")
def make_status_updater(
self, status_message: discord.Message
) -> Callable[[str, bool], Awaitable[None]]:
last_content = ""
last_update_at = 0.0
async def update_status(content: str, force: bool = False):
nonlocal last_content, last_update_at
if not content:
return
now = asyncio.get_running_loop().time()
if not force and (content == last_content or now - last_update_at < STATUS_UPDATE_MIN_INTERVAL_SECONDS):
return
with suppress(discord.HTTPException):
await status_message.edit(content=f"_{content}_")
last_content = content
last_update_at = now
return update_status
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot or not message.guild:
@ -202,43 +160,8 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
]
formatted_messages.append({"role": "user", "content": f"{user_name}: {prompt}"})
status_message = None
status_update = None
with suppress(discord.HTTPException):
status_message = await message.channel.send(self.get_thinking_status_message())
status_update = self.make_status_updater(status_message)
response_text = await self.generate_response(api_key, formatted_messages)
response_text = None
if hasattr(message.channel, "typing"):
try:
async with message.channel.typing():
response_text = await self.generate_response(
api_key,
formatted_messages,
status_update=status_update,
)
except (discord.HTTPException, AttributeError):
# Fall back to normal processing if typing indicator isn't available.
response_text = await self.generate_response(
api_key,
formatted_messages,
status_update=status_update,
)
else:
response_text = await self.generate_response(
api_key,
formatted_messages,
status_update=status_update,
)
try:
await self.send_split_message(message.channel, response_text)
finally:
if status_message is not None:
with suppress(discord.HTTPException):
await status_message.delete()
try:
memory.append({"user": user_name, "content": prompt})
memory.append({"user": "Reginald", "content": response_text})
@ -262,8 +185,8 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
).mid_term_memory() as mid_memory:
short_memory[channel_id] = memory
mid_memory[channel_id] = mid_term_summaries[-self.summary_retention_limit :]
except Exception as error:
print(f"DEBUG: Memory persistence failed after response delivery: {error}")
await self.send_split_message(message.channel, response_text)
def should_reginald_interject(self, message_content: str) -> bool:
direct_invocation = {"reginald,"}
@ -293,31 +216,19 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
return json.dumps(result, default=str)
@debug
async def generate_response(
self,
api_key: str,
messages: list[dict],
status_update: Callable[[str, bool], Awaitable[None]] | None = None,
) -> str:
async def generate_response(self, api_key: str, messages: list[dict]) -> str:
model = await self.config.openai_model() or DEFAULT_MODEL
async def maybe_update_status(content: str, force: bool = False):
if status_update is not None:
await status_update(content, force)
try:
client = openai.AsyncOpenAI(api_key=api_key)
completion_args = {
"model": model,
"messages": messages,
# Keep modern token cap field and rely on model defaults for sampling controls.
# GPT-5 family compatibility notes: temperature/top_p/logprobs are not universally
# accepted across snapshots/reasoning settings.
# `max_completion_tokens` is the recommended limit field for modern/reasoning models.
"max_completion_tokens": DEFAULT_MAX_COMPLETION_TOKENS,
"temperature": DEFAULT_TEMPERATURE,
"tools": TOOLS,
"tool_choice": "auto",
}
await maybe_update_status("Reginald is thinking...", force=True)
response = await client.chat.completions.create(**completion_args)
assistant_message = response.choices[0].message
@ -333,7 +244,6 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
if tool_calls:
for tool_call in tool_calls:
await maybe_update_status(self.get_tool_status_message(tool_call.function.name), force=True)
tool_result = await self._execute_tool_call(tool_call)
messages.append(
{
@ -344,7 +254,6 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
)
completion_args["messages"] = messages
await maybe_update_status("Reginald is composing a polished response...", force=True)
response = await client.chat.completions.create(**completion_args)
if response.choices and response.choices[0].message and response.choices[0].message.content:
@ -355,12 +264,10 @@ class ReginaldCog(PermissionsMixin, BlacklistMixin, MemoryMixin, commands.Cog):
print("DEBUG: OpenAI response was empty or malformed:", response)
response_text = "No response received from AI."
await maybe_update_status("Reginald is delivering his reply...", force=True)
return response_text
except OpenAIError as error:
error_message = f"OpenAI Error: {error}"
await maybe_update_status("Reginald has encountered an unfortunate complication.", force=True)
reginald_responses = [
f"Regrettably, I must inform you that I have encountered a bureaucratic obstruction:\n\n{error_message}",
f"It would seem that a most unfortunate technical hiccup has befallen my faculties:\n\n{error_message}",