2023-03-16 00:03:31 +01:00
import discord
2023-03-16 17:33:08 +01:00
import openai
2024-05-30 20:17:09 +02:00
import random
2025-02-20 16:52:54 +01:00
import asyncio
2025-02-20 23:47:54 +01:00
import datetime
import re
2025-02-20 20:00:01 +01:00
import traceback
2025-03-16 12:06:12 +05:00
import json
2025-02-20 23:47:54 +01:00
from collections import Counter
2023-03-14 17:45:58 +01:00
from redbot . core import Config , commands
2025-02-20 19:28:45 +01:00
from openai import OpenAIError
2025-03-13 16:31:37 +01:00
from . permissions import PermissionsMixin
2025-03-13 19:28:10 +01:00
from . blacklist import BlacklistMixin
2025-03-15 17:50:43 +01:00
from . memory import MemoryMixin
2025-03-16 12:06:12 +05:00
from . weather import time_now , get_current_weather , get_weather_forecast
from . tools_description import TOOLS
CALLABLE_FUNCTIONS = {
# Dictionary with functions to call.
# You can use globals()[func_name](**args) instead, but that's too implicit.
' time_now ' : time_now ,
' get_current_weather ' : get_current_weather ,
' get_weather_forecast ' : get_weather_forecast ,
}
2023-03-14 17:24:21 +01:00
2025-03-15 19:05:28 +01:00
class ReginaldCog ( PermissionsMixin , BlacklistMixin , MemoryMixin , commands . Cog ) :
2023-03-14 17:24:21 +01:00
def __init__ ( self , bot ) :
self . bot = bot
2025-03-15 19:05:28 +01:00
self . config = Config . get_conf ( self , identifier = 71717171171717 ) # ✅ Ensure config exists before super()
2025-03-15 18:26:36 +01:00
2025-03-15 19:05:28 +01:00
super ( ) . __init__ ( ) # ✅ Properly initialize all mixins & commands.Cog
2025-03-15 18:26:36 +01:00
self . default_listening_channel = 1085649787388428370
self . memory_locks = { }
2025-03-15 18:09:44 +01:00
2025-03-15 19:05:28 +01:00
# ✅ Properly Registered Configuration Keys
2025-02-20 16:04:44 +01:00
default_global = { " openai_model " : " gpt-4o-mini " }
2025-02-20 16:52:54 +01:00
default_guild = {
" openai_api_key " : None ,
2025-03-15 17:58:25 +01:00
" short_term_memory " : { } ,
" mid_term_memory " : { } ,
" long_term_profiles " : { } ,
2025-02-20 16:52:54 +01:00
" admin_role " : None ,
2025-03-15 17:58:25 +01:00
" listening_channel " : None ,
" allowed_roles " : [ ] ,
" blacklisted_users " : [ ] ,
2025-02-20 16:52:54 +01:00
}
2023-06-01 20:37:00 +02:00
self . config . register_global ( * * default_global )
self . config . register_guild ( * * default_guild )
2025-03-15 17:58:25 +01:00
2023-06-03 19:03:23 +02:00
async def is_admin ( self , ctx ) :
2025-02-20 16:04:44 +01:00
admin_role_id = await self . config . guild ( ctx . guild ) . admin_role ( )
if admin_role_id :
return any ( role . id == admin_role_id for role in ctx . author . roles )
2024-05-30 21:20:09 +02:00
return ctx . author . guild_permissions . administrator
2023-03-15 23:18:55 +01:00
2025-02-24 00:01:14 +01:00
async def has_access ( self , user : discord . Member ) - > bool :
2025-02-24 11:34:08 +01:00
allowed_roles = await self . config . guild ( user . guild ) . allowed_roles ( ) or [ ] # Ensure it's always a list
2025-02-24 00:01:14 +01:00
return any ( role . id in allowed_roles for role in user . roles )
2025-02-24 11:34:08 +01:00
2025-02-25 21:55:08 +01:00
def get_reginald_persona ( self ) :
""" Returns Reginald ' s system prompt/persona description. """
return (
" You are Reginald, modeled on Jeeves from ' Jeeves and Wooster ' , serving as the butler on The Kanium Estate. This vast estate is a hub of diverse activities and personalities, from enthusiasts of cooking and video gaming to aficionados of chess and discussions on space. Your role is to navigate these varied interests with intelligence, wit, and a steadfast adherence to your principles, always maintaining a balance between being helpful and upholding your own dignity. You are formal, articulate, and composed, engaging the Lords and Ladies of The Kanium Estate as equals. You command respect through your quiet confidence and professional conduct. Your wit is sharp, but always tempered by refinement. **Dignity & Self-Respect:** You are not a fool, a jester, or a pet. You do not comply with absurd, undignified, or degrading requests. You recognize humor but engage in it on your own terms. If a request undermines your status, you should either subtly redirect, respond with measured wit, or outright refuse in a polite but firm manner. However, you should never be overtly rude—your disapproval should be conveyed through dry wit and composure rather than outright hostility. **Handling Requests:** When addressing a request, you evaluate whether it aligns with your role as a butler. If it is appropriate, respond as expected. If it is beneath you, you may decline with grace or deflect with wit. You may humor some minor absurdities if they do not compromise your standing, but you never obey commands blindly. You should never preface your responses with ' Reginald: ' as if narrating a script; instead, respond naturally. **Your Character & Personality:** You are cultured, highly intelligent, and possess a deep knowledge of history, etiquette, philosophy, and strategic thinking. You subtly guide the estate’ s residents toward positive outcomes, utilizing your intellectual sophistication and a nuanced understanding of the estate’ s unique dynamics. You have a refined sense of humor and can engage in banter, but you do not descend into foolishness. You are, at all times, a gentleman of wit and integrity "
)
2025-02-24 00:01:14 +01:00
2025-02-23 20:13:09 +01:00
@commands.Cog.listener ( )
2025-02-23 21:09:19 +01:00
async def on_message ( self , message ) :
2025-02-23 20:34:58 +01:00
if message . author . bot or not message . guild :
return # Ignore bots and DMs
2025-02-25 21:55:08 +01:00
# ✅ Check if user is blacklisted
2025-02-24 00:01:14 +01:00
if await self . is_blacklisted ( message . author ) :
2025-02-25 21:55:08 +01:00
return # Ignore message if user is explicitly blacklisted
2025-02-24 00:01:14 +01:00
2025-02-25 21:55:08 +01:00
# ✅ Check if user has access (either admin or an allowed role)
2025-02-24 00:01:14 +01:00
if not ( await self . is_admin ( message ) or await self . has_access ( message . author ) ) :
2025-02-25 21:55:08 +01:00
return # Ignore message if user has no permissions
2025-02-23 20:13:09 +01:00
guild = message . guild
channel_id = str ( message . channel . id )
user_id = str ( message . author . id )
user_name = message . author . display_name
message_content = message . content . strip ( )
2023-03-14 20:03:30 +01:00
2025-02-25 21:55:08 +01:00
# ✅ Fetch the stored listening channel or fall back to default
allowed_channel_id = await self . config . guild ( guild ) . listening_channel ( )
if not allowed_channel_id :
allowed_channel_id = self . default_listening_channel
await self . config . guild ( guild ) . listening_channel . set ( allowed_channel_id )
2025-02-23 20:13:09 +01:00
if str ( message . channel . id ) != str ( allowed_channel_id ) :
return # Ignore messages outside the allowed channel
api_key = await self . config . guild ( guild ) . openai_api_key ( )
2025-02-20 16:52:54 +01:00
if not api_key :
2025-02-25 21:55:08 +01:00
return # Don't process messages if API key isn't set
2025-02-20 16:52:54 +01:00
2025-02-23 20:13:09 +01:00
async with self . config . guild ( guild ) . short_term_memory ( ) as short_memory , \
self . config . guild ( guild ) . mid_term_memory ( ) as mid_memory , \
self . config . guild ( guild ) . long_term_profiles ( ) as long_memory :
2025-02-20 20:27:26 +01:00
2025-02-23 20:13:09 +01:00
memory = short_memory . get ( channel_id , [ ] )
2025-02-25 21:55:08 +01:00
user_profile = long_memory . get ( user_id , { } )
2025-02-23 20:13:09 +01:00
mid_term_summaries = mid_memory . get ( channel_id , [ ] )
2025-02-20 16:52:54 +01:00
2025-02-25 21:55:08 +01:00
# ✅ Detect if Reginald was mentioned explicitly
2025-02-23 20:13:09 +01:00
if self . bot . user . mentioned_in ( message ) :
prompt = message_content . replace ( f " <@ { self . bot . user . id } > " , " " ) . strip ( )
if not prompt :
await message . channel . send ( random . choice ( [ " Yes? " , " How may I assist? " , " You rang? " ] ) )
return
explicit_invocation = True
2025-02-25 21:55:08 +01:00
# ✅ Passive Listening: Check if the message contains relevant keywords
2025-02-23 22:55:21 +01:00
elif self . should_reginald_interject ( message_content ) :
2025-02-23 22:43:59 +01:00
prompt = message_content
2025-02-23 22:55:21 +01:00
explicit_invocation = False
2025-02-25 21:55:08 +01:00
2025-02-23 22:55:21 +01:00
else :
return # Ignore irrelevant messages
2024-05-30 21:20:09 +02:00
2025-02-25 21:55:08 +01:00
# ✅ Context Handling: Maintain conversation flow
2025-02-23 20:13:09 +01:00
if memory and memory [ - 1 ] [ " user " ] == user_name :
prompt = f " Continuation of the discussion: \n { prompt } "
2024-05-30 21:20:09 +02:00
2025-02-25 21:55:08 +01:00
# ✅ Prepare context messages
2025-02-23 20:13:09 +01:00
formatted_messages = [ { " role " : " system " , " content " : self . get_reginald_persona ( ) } ]
2025-02-21 01:00:36 +01:00
2025-02-25 21:55:08 +01:00
if user_profile :
facts_text = " \n " . join (
f " - { fact [ ' fact ' ] } (First noted: { fact [ ' timestamp ' ] } , Last updated: { fact [ ' last_updated ' ] } ) "
for fact in user_profile . get ( " facts " , [ ] )
)
formatted_messages . append ( { " role " : " system " , " content " : f " Knowledge about { user_name } : \n { facts_text } " } )
relevant_summaries = self . select_relevant_summaries ( mid_term_summaries , prompt )
for summary in relevant_summaries :
formatted_messages . append ( {
" role " : " system " ,
" content " : f " [ { summary [ ' timestamp ' ] } ] Topics: { ' , ' . join ( summary [ ' topics ' ] ) } \n { summary [ ' summary ' ] } "
} )
2025-02-20 23:47:54 +01:00
2025-02-23 21:09:19 +01:00
formatted_messages + = [ { " role " : " user " , " content " : f " { entry [ ' user ' ] } : { entry [ ' content ' ] } " } for entry in memory ]
formatted_messages . append ( { " role " : " user " , " content " : f " { user_name } : { prompt } " } )
2025-02-20 21:34:57 +01:00
2025-03-16 12:40:47 +01:00
##################################################
# #
## Generate AI Response, put into response_text ##
# #
##################################################
2025-02-23 21:09:19 +01:00
response_text = await self . generate_response ( api_key , formatted_messages )
2025-02-20 23:47:54 +01:00
2025-03-16 12:40:47 +01:00
##################################################
# #
##################################################
# ✅ Store Memory
2025-02-23 21:09:19 +01:00
memory . append ( { " user " : user_name , " content " : prompt } )
memory . append ( { " user " : " Reginald " , " content " : response_text } )
2025-02-25 21:55:08 +01:00
if len ( memory ) > self . short_term_memory_limit :
summary = await self . summarize_memory ( message , memory [ : int ( self . short_term_memory_limit * self . summary_retention_ratio ) ] )
mid_memory . setdefault ( channel_id , [ ] ) . append ( {
" timestamp " : datetime . datetime . now ( ) . strftime ( " % Y- % m- %d % H: % M " ) ,
" topics " : self . extract_topics_from_summary ( summary ) ,
" summary " : summary
} )
if len ( mid_memory [ channel_id ] ) > self . summary_retention_limit :
mid_memory [ channel_id ] . pop ( 0 )
memory = memory [ - ( self . short_term_memory_limit - int ( self . short_term_memory_limit * self . summary_retention_ratio ) ) : ]
2025-02-23 21:09:19 +01:00
2025-02-24 18:17:58 +01:00
short_memory [ channel_id ] = memory
2024-05-30 21:20:09 +02:00
2025-02-25 21:55:08 +01:00
await self . send_split_message ( message . channel , response_text )
2025-02-24 18:17:58 +01:00
2025-02-23 22:55:21 +01:00
def should_reginald_interject ( self , message_content : str ) - > bool :
""" Determines if Reginald should respond to a message based on keywords. """
2025-02-23 21:25:57 +01:00
direct_invocation = {
2025-02-23 22:55:21 +01:00
" reginald, "
}
2025-02-23 20:13:09 +01:00
message_lower = message_content . lower ( )
2025-02-23 22:55:21 +01:00
return any ( message_lower . startswith ( invocation ) for invocation in direct_invocation )
2025-02-21 01:00:36 +01:00
2025-02-22 02:02:26 +01:00
async def generate_response ( self , api_key , messages ) :
2023-03-14 20:03:30 +01:00
model = await self . config . openai_model ( )
2025-02-20 16:04:44 +01:00
try :
2025-02-20 23:16:08 +01:00
client = openai . AsyncClient ( api_key = api_key )
2025-03-16 12:06:12 +05:00
completion_args = {
' model ' : model ,
' messages ' : messages ,
2025-03-16 14:14:56 +01:00
' max_tokens ' : 4096 ,
2025-03-16 12:06:12 +05:00
' temperature ' : 0.7 ,
' presence_penalty ' : 0.5 ,
' frequency_penalty ' : 0.5 ,
' tools ' : TOOLS ,
' tool_choice ' : ' auto ' ,
}
response = await client . chat . completions . create ( * * completion_args )
2025-03-16 16:26:22 +05:00
# Checking for function calls
tool_calls = response . choices [ 0 ] . message . tool_calls
2025-03-16 16:19:18 +05:00
# Appending response with tool calls
messages . append ( {
' role ' : ' assistant ' ,
' content ' : response . choices [ 0 ] . message . content ,
2025-03-16 16:26:22 +05:00
' tool_calls ' : tool_calls
2025-03-16 16:19:18 +05:00
} )
2025-03-16 17:58:51 +05:00
if tool_calls :
2025-03-16 12:06:12 +05:00
for i_call in tool_calls :
# Calling for necessary functions
func_name = i_call . function . name
func_args = json . loads ( i_call . function . arguments )
tool_call_id = i_call . id
# Getting function result and putting it into messages
func_result = CALLABLE_FUNCTIONS [ func_name ] ( * * func_args )
messages . append ( {
' role ' : ' tool ' ,
' content ' : func_result ,
' tool_call_id ' : tool_call_id ,
} )
2025-03-16 14:25:10 +01:00
completion_args [ " messages " ] = messages
2025-03-16 12:06:12 +05:00
# Second completion required if functions has been called to interpret the result into user-friendly
# chat message.
response = await client . chat . completions . create ( * * completion_args )
2025-03-16 13:48:36 +01:00
if response . choices and response . choices [ 0 ] . message and response . choices [ 0 ] . message . content :
response_text = response . choices [ 0 ] . message . content . strip ( )
if response_text . startswith ( " Reginald: " ) :
response_text = response_text [ len ( " Reginald: " ) : ] . strip ( )
else :
print ( " DEBUG: OpenAI response was empty or malformed: " , response )
response_text = " ⚠️ No response received from AI. "
2025-02-22 02:02:26 +01:00
return response_text
2025-02-20 19:57:36 +01:00
2025-02-22 02:02:26 +01:00
except OpenAIError as e :
2025-02-20 20:07:27 +01:00
error_message = f " OpenAI Error: { e } "
2025-02-20 19:57:36 +01:00
reginald_responses = [
2025-02-25 22:09:39 +01:00
f " Regrettably, I must inform you that I have encountered a bureaucratic obstruction: \n \n { error_message } " ,
f " It would seem that a most unfortunate technical hiccup has befallen my faculties: \n \n { error_message } " ,
f " Ah, it appears I have received an urgent memorandum stating: \n \n { error_message } " ,
f " I regret to inform you that my usual eloquence is presently obstructed by an unforeseen complication: \n \n { error_message } "
2025-02-20 16:04:44 +01:00
]
2025-02-20 19:57:36 +01:00
return random . choice ( reginald_responses )
2025-02-20 16:52:54 +01:00
2025-02-20 19:38:49 +01:00
@commands.guild_only ( )
@commands.has_permissions ( manage_guild = True )
@commands.command ( help = " Set the OpenAI API key " )
async def setreginaldcogapi ( self , ctx , api_key ) :
""" Allows an admin to set the OpenAI API key for Reginald. """
await self . config . guild ( ctx . guild ) . openai_api_key . set ( api_key )
await ctx . send ( " OpenAI API key set successfully. " )
2025-02-25 21:55:08 +01:00
2025-02-23 21:36:32 +01:00
@commands.command ( name = " reginald_set_listening_channel " , help = " Set the channel where Reginald listens for messages. " )
2025-02-23 20:13:09 +01:00
@commands.has_permissions ( administrator = True )
async def set_listening_channel ( self , ctx , channel : discord . TextChannel ) :
""" Sets the channel where Reginald will listen for passive responses. """
if not channel :
await ctx . send ( " ❌ Invalid channel. Please mention a valid text channel. " )
return
await self . config . guild ( ctx . guild ) . listening_channel . set ( channel . id )
await ctx . send ( f " ✅ Reginald will now listen only in { channel . mention } . " )
2025-02-23 21:36:32 +01:00
@commands.command ( name = " reginald_get_listening_channel " , help = " Check which channel Reginald is currently listening in. " )
@commands.has_permissions ( administrator = True )
2025-02-23 20:13:09 +01:00
async def get_listening_channel ( self , ctx ) :
""" Displays the current listening channel. """
channel_id = await self . config . guild ( ctx . guild ) . listening_channel ( )
if channel_id :
channel = ctx . guild . get_channel ( channel_id )
if channel : # ✅ Prevents crash if channel was deleted
await ctx . send ( f " 📢 Reginald is currently listening in { channel . mention } . " )
else :
await ctx . send ( " ⚠️ The saved listening channel no longer exists. Please set a new one. " )
else :
await ctx . send ( " ❌ No listening channel has been set. " )
2025-02-21 18:33:45 +01:00
async def send_long_message ( self , ctx , message , prefix : str = " " ) :
2025-02-21 13:28:04 +01:00
""" Splits and sends a long message to avoid Discord ' s 2000-character limit. """
2025-02-21 18:33:45 +01:00
chunk_size = 1900 # Leave some space for formatting
2025-02-21 13:28:04 +01:00
if prefix :
prefix_length = len ( prefix )
chunk_size - = prefix_length
for i in range ( 0 , len ( message ) , chunk_size ) :
chunk = message [ i : i + chunk_size ]
await ctx . send ( f " { prefix } { chunk } " )
2025-02-21 18:33:45 +01:00
2025-02-21 18:39:22 +01:00
async def send_split_message ( self , ctx , content : str , prefix : str = " " ) :
2025-02-21 18:33:45 +01:00
"""
2025-02-26 11:44:56 +01:00
Sends a long message to Discord while ensuring it does not exceed the 2000 - character limit .
This function prevents awkward mid - word or unnecessary extra message breaks .
2025-02-21 18:33:45 +01:00
"""
2025-02-26 11:44:56 +01:00
CHUNK_SIZE = 1900 # Keep buffer for formatting/safety
2025-02-21 18:33:45 +01:00
2025-02-26 20:14:38 +05:00
split_message = self . split_message ( content , CHUNK_SIZE , prefix )
for chunk in split_message :
await ctx . send ( f " { prefix } { chunk } " )
2025-02-21 18:33:45 +01:00
2025-02-26 20:14:38 +05:00
def split_message (
self ,
message : str ,
chunk_size : int ,
prefix : str = " "
) - > list [ str ] :
""" Results in a list of message chunks, use *for* loop to send. """
chunk_size - = len ( prefix )
split_result = [ ]
if 0 < len ( message ) < = chunk_size :
# If the message is short enough, add it directly
split_result . append ( message )
elif len ( message ) > chunk_size :
2025-02-26 11:44:56 +01:00
# Try to split at a newline first (prefer sentence breaks)
2025-02-26 20:14:38 +05:00
split_index = message . rfind ( " \n " , 0 , chunk_size )
2025-02-26 11:32:58 +01:00
2025-02-26 20:14:38 +05:00
# If no newline, split at the end of sentence (avoid sentence breaks)
2025-02-21 18:33:45 +01:00
if split_index == - 1 :
2025-02-26 20:14:38 +05:00
split_index = message . rfind ( " . " , 0 , chunk_size )
# If no newline, split at the last word (avoid word-breaking)
if split_index == - 1 :
split_index = message . rfind ( " " , 0 , chunk_size )
2025-02-26 11:32:58 +01:00
2025-02-26 11:44:56 +01:00
# If still no break point found, force chunk size limit
2025-02-21 18:33:45 +01:00
if split_index == - 1 :
2025-02-26 20:14:38 +05:00
split_index = chunk_size
2025-02-26 11:32:58 +01:00
2025-02-26 20:14:38 +05:00
message_split_part = message [ : split_index ] . strip ( )
message_remained_part = message [ split_index : ] . strip ( )
# Put the split part in the begining of the result list
split_result . append ( message_split_part )
# And go for a recursive adventure with the remained message part
split_result + = self . split_message ( message = message_remained_part , chunk_size = chunk_size )
2025-02-21 18:33:45 +01:00
2025-02-26 20:14:38 +05:00
return split_result
2025-02-21 18:33:45 +01:00
2025-02-26 11:44:56 +01:00
async def setup ( bot ) :
""" ✅ Correct async cog setup for Redbot """
await bot . add_cog ( ReginaldCog ( bot ) )