Sending cooldown messages in DMs
This commit is contained in:
parent
b42b475690
commit
0e298b6cf1
@ -23,7 +23,7 @@ class ReginaldCog(commands.Cog):
|
|||||||
|
|
||||||
@commands.guild_only()
|
@commands.guild_only()
|
||||||
@commands.command(help="Ask Reginald a question")
|
@commands.command(help="Ask Reginald a question")
|
||||||
@commands.cooldown(1, 300, commands.BucketType.user) # 5-minute cooldown per user
|
@commands.cooldown(1, 60, commands.BucketType.user) # 1-minute cooldown per user
|
||||||
async def reginald(self, ctx, *, prompt=None):
|
async def reginald(self, ctx, *, prompt=None):
|
||||||
ignored_user_id = 138125632876838912
|
ignored_user_id = 138125632876838912
|
||||||
if ctx.author.id == ignored_user_id:
|
if ctx.author.id == ignored_user_id:
|
||||||
@ -46,15 +46,19 @@ class ReginaldCog(commands.Cog):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
response_text = await self.generate_response(api_key, prompt)
|
response_text = await self.generate_response(api_key, prompt)
|
||||||
await ctx.send(response_text)
|
for chunk in self.split_response(response_text, 2000):
|
||||||
|
await ctx.send(chunk)
|
||||||
except openai.error.OpenAIError as e:
|
except openai.error.OpenAIError as e:
|
||||||
await ctx.send(f"I apologize, but I am unable to generate a response at this time. Error message: {str(e)}")
|
await ctx.send(f"I apologize, but I am unable to generate a response at this time. Error message: {str(e)}")
|
||||||
|
except commands.CommandOnCooldown as e:
|
||||||
|
remaining_seconds = int(e.retry_after)
|
||||||
|
await ctx.author.send(f'Please wait {remaining_seconds} seconds before using the "reginald" command again.')
|
||||||
|
|
||||||
async def generate_response(self, api_key, prompt):
|
async def generate_response(self, api_key, prompt):
|
||||||
model = await self.config.openai_model()
|
model = await self.config.openai_model()
|
||||||
openai.api_key = api_key
|
openai.api_key = api_key
|
||||||
max_tokens = 1000
|
max_tokens = 1000
|
||||||
temperature = 0.7
|
temperature = 0.5
|
||||||
response = openai.Completion.create(
|
response = openai.Completion.create(
|
||||||
model=model,
|
model=model,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
@ -62,12 +66,23 @@ class ReginaldCog(commands.Cog):
|
|||||||
n=1,
|
n=1,
|
||||||
stop=None,
|
stop=None,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
presence_penalty=0.3,
|
presence_penalty=0.2,
|
||||||
frequency_penalty=0.3,
|
frequency_penalty=0.1,
|
||||||
best_of=3
|
best_of=3
|
||||||
)
|
)
|
||||||
return response.choices[0].text.strip()
|
return response.choices[0].text.strip()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def split_response(response_text, max_chars):
|
||||||
|
chunks = []
|
||||||
|
while len(response_text) > max_chars:
|
||||||
|
split_index = response_text[:max_chars].rfind(' ')
|
||||||
|
chunk = response_text[:split_index]
|
||||||
|
chunks.append(chunk)
|
||||||
|
response_text = response_text[split_index:].strip()
|
||||||
|
chunks.append(response_text)
|
||||||
|
return chunks
|
||||||
|
|
||||||
def setup(bot):
|
def setup(bot):
|
||||||
cog = ReginaldCog(bot)
|
cog = ReginaldCog(bot)
|
||||||
bot.add_cog(cog)
|
bot.add_cog(cog)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user