Refactor code structure for improved readability and maintainability
This commit is contained in:
@@ -138,6 +138,10 @@ Once the bot is running, it connects to Discord using credentials from `.env`. A
|
||||
The bot supports the following models:
|
||||
- openai/gpt-4o
|
||||
- openai/gpt-4o-mini
|
||||
- openai/gpt-5
|
||||
- openai/gpt-5-nano
|
||||
- openai/gpt-5-mini
|
||||
- openai/gpt-5-chat
|
||||
- openai/o1-preview
|
||||
- openai/o1-mini
|
||||
- openai/o1
|
||||
|
||||
BIN
src/commands/__pycache__/commands.cpython-312.pyc
Normal file
BIN
src/commands/__pycache__/commands.cpython-312.pyc
Normal file
Binary file not shown.
@@ -6,7 +6,7 @@ import io
|
||||
import asyncio
|
||||
from typing import Optional, Dict, List, Any, Callable
|
||||
|
||||
from src.config.config import MODEL_OPTIONS, PDF_ALLOWED_MODELS
|
||||
from src.config.config import MODEL_OPTIONS, PDF_ALLOWED_MODELS, DEFAULT_MODEL
|
||||
from src.utils.image_utils import ImageGenerator
|
||||
from src.utils.web_utils import google_custom_search, scrape_web_content
|
||||
from src.utils.pdf_utils import process_pdf, send_response
|
||||
@@ -119,7 +119,7 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
|
||||
async def process_search(interaction: discord.Interaction, query: str):
|
||||
user_id = interaction.user.id
|
||||
model = await db_handler.get_user_model(user_id) or "openai/gpt-4.1-mini"
|
||||
model = await db_handler.get_user_model(user_id) or DEFAULT_MODEL
|
||||
history = await db_handler.get_history(user_id)
|
||||
|
||||
try:
|
||||
@@ -157,7 +157,7 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
|
||||
# Send to the AI model
|
||||
response = await openai_client.chat.completions.create(
|
||||
model=model if model in ["openai/gpt-4o", "openai/openai/gpt-4o-mini"] else "openai/gpt-4o",
|
||||
model=model if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"] else "openai/gpt-4o",
|
||||
messages=messages,
|
||||
temperature=0.5
|
||||
)
|
||||
@@ -200,7 +200,7 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
|
||||
async def process_web(interaction: discord.Interaction, url: str):
|
||||
user_id = interaction.user.id
|
||||
model = await db_handler.get_user_model(user_id) or "openai/gpt-4.1-mini"
|
||||
model = await db_handler.get_user_model(user_id) or DEFAULT_MODEL
|
||||
history = await db_handler.get_history(user_id)
|
||||
|
||||
try:
|
||||
@@ -222,7 +222,7 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
]
|
||||
|
||||
response = await openai_client.chat.completions.create(
|
||||
model=model if model in ["openai/gpt-4o", "openai/gpt-4o-mini"] else "openai/gpt-4o",
|
||||
model=model if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"] else "openai/gpt-4o",
|
||||
messages=messages,
|
||||
temperature=0.3,
|
||||
top_p=0.7
|
||||
@@ -307,10 +307,10 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
|
||||
user_id = interaction.user.id
|
||||
history = await db_handler.get_history(user_id)
|
||||
model = await db_handler.get_user_model(user_id) or "openai/gpt-4.1-mini" # Default model
|
||||
model = await db_handler.get_user_model(user_id) or DEFAULT_MODEL # Default model
|
||||
|
||||
# Adjust model for encoding purposes
|
||||
if model in ["openai/gpt-4o", "openai/o1", "openai/o1-preview", "openai/o1-mini", "openai/o3-mini"]:
|
||||
if model in ["openai/gpt-4o", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat", "openai/o1", "openai/o1-preview", "openai/o1-mini", "openai/o3-mini"]:
|
||||
encoding_model = "openai/gpt-4o"
|
||||
else:
|
||||
encoding_model = model
|
||||
@@ -362,7 +362,7 @@ def setup_commands(bot: commands.Bot, db_handler, openai_client, image_generator
|
||||
"""Sends a list of available commands to the user."""
|
||||
help_message = (
|
||||
"**Available commands:**\n"
|
||||
"/choose_model - Select which AI model to use for responses (openai/gpt-4o, openai/gpt-4o-mini, openai/o1-preview, openai/o1-mini).\n"
|
||||
"/choose_model - Select which AI model to use for responses (openai/gpt-4o, openai/gpt-4o-mini, openai/gpt-5, openai/gpt-5-nano, openai/gpt-5-mini, openai/gpt-5-chat, openai/o1-preview, openai/o1-mini).\n"
|
||||
"/search `<query>` - Search Google and send results to the AI model.\n"
|
||||
"/web `<url>` - Scrape a webpage and send the data to the AI model.\n"
|
||||
"/generate `<prompt>` - Generate an image from a text prompt.\n"
|
||||
|
||||
BIN
src/config/__pycache__/config.cpython-312.pyc
Normal file
BIN
src/config/__pycache__/config.cpython-312.pyc
Normal file
Binary file not shown.
@@ -67,6 +67,10 @@ MODEL_OPTIONS = [
|
||||
"openai/gpt-4.1",
|
||||
"openai/gpt-4.1-nano",
|
||||
"openai/gpt-4.1-mini",
|
||||
"openai/gpt-5",
|
||||
"openai/gpt-5-nano",
|
||||
"openai/gpt-5-mini",
|
||||
"openai/gpt-5-chat",
|
||||
"openai/o1-preview",
|
||||
"openai/o1-mini",
|
||||
"openai/o1",
|
||||
@@ -87,13 +91,20 @@ MODEL_TOKEN_LIMITS = {
|
||||
"openai/gpt-4.1-mini": 8000,
|
||||
"openai/o3-mini": 4000,
|
||||
"openai/o3": 4000,
|
||||
"openai/o4-mini": 4000
|
||||
"openai/o4-mini": 4000,
|
||||
"openai/gpt-5": 8000,
|
||||
"openai/gpt-5-nano": 8000,
|
||||
"openai/gpt-5-mini": 8000,
|
||||
"openai/gpt-5-chat": 8000
|
||||
}
|
||||
|
||||
# Default token limit for unknown models
|
||||
DEFAULT_TOKEN_LIMIT = 60000
|
||||
|
||||
PDF_ALLOWED_MODELS = ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-4.1","openai/gpt-4.1-nano","openai/gpt-4.1-mini"]
|
||||
# Default model for new users
|
||||
DEFAULT_MODEL = "openai/gpt-4.1"
|
||||
|
||||
PDF_ALLOWED_MODELS = ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-4.1","openai/gpt-4.1-nano","openai/gpt-4.1-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"]
|
||||
PDF_BATCH_SIZE = 3
|
||||
|
||||
# Prompt templates
|
||||
|
||||
BIN
src/module/__pycache__/message_handler.cpython-312.pyc
Normal file
BIN
src/module/__pycache__/message_handler.cpython-312.pyc
Normal file
Binary file not shown.
@@ -19,7 +19,7 @@ from src.utils.openai_utils import process_tool_calls, prepare_messages_for_api,
|
||||
from src.utils.pdf_utils import process_pdf, send_response
|
||||
from src.utils.code_utils import extract_code_blocks
|
||||
from src.utils.reminder_utils import ReminderManager
|
||||
from src.config.config import PDF_ALLOWED_MODELS, MODEL_TOKEN_LIMITS, DEFAULT_TOKEN_LIMIT
|
||||
from src.config.config import PDF_ALLOWED_MODELS, MODEL_TOKEN_LIMITS, DEFAULT_TOKEN_LIMIT, DEFAULT_MODEL
|
||||
|
||||
# Global task and rate limiting tracking
|
||||
user_tasks = {}
|
||||
@@ -700,7 +700,7 @@ class MessageHandler:
|
||||
|
||||
# Get history and model preferences first
|
||||
history = await self.db.get_history(user_id)
|
||||
model = await self.db.get_user_model(user_id) or "openai/gpt-4.1-mini" # Default to openai/gpt-4.1-mini if no model set
|
||||
model = await self.db.get_user_model(user_id) or DEFAULT_MODEL # Default to configured default model if no model set
|
||||
|
||||
# Handle PDF files
|
||||
if message.attachments:
|
||||
@@ -859,14 +859,14 @@ class MessageHandler:
|
||||
|
||||
# Determine which models should have tools available
|
||||
# openai/o1-mini and openai/o1-preview do not support tools
|
||||
use_tools = model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/o1", "openai/o3-mini", "openai/gpt-4.1", "openai/gpt-4.1-mini", "openai/gpt-4.1-nano", "openai/o3", "openai/o4-mini"]
|
||||
use_tools = model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat", "openai/o1", "openai/o3-mini", "openai/gpt-4.1", "openai/gpt-4.1-mini", "openai/gpt-4.1-nano", "openai/o3", "openai/o4-mini"]
|
||||
|
||||
# Prepare API call parameters
|
||||
api_params = {
|
||||
"model": model,
|
||||
"messages": messages_for_api,
|
||||
"temperature": 0.3 if model in ["openai/gpt-4o", "openai/openai/gpt-4o-mini"] else 1,
|
||||
"top_p": 0.7 if model in ["openai/gpt-4o", "openai/gpt-4o-mini"] else 1,
|
||||
"temperature": 0.3 if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"] else 1,
|
||||
"top_p": 0.7 if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"] else 1,
|
||||
"timeout": 120 # Increased timeout for better response handling
|
||||
}
|
||||
|
||||
@@ -1010,7 +1010,7 @@ class MessageHandler:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=updated_messages,
|
||||
temperature=0.3 if model in ["openai/gpt-4o", "openai/gpt-4o-mini"] else 1,
|
||||
temperature=0.3 if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat"] else 1,
|
||||
timeout=120
|
||||
)
|
||||
|
||||
@@ -1033,7 +1033,7 @@ class MessageHandler:
|
||||
})
|
||||
|
||||
# Store the response in history for models that support it
|
||||
if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/o1", "openai/o1-mini", "openai/o3-mini", "openai/gpt-4.1", "openai/gpt-4.1-nano", "openai/gpt-4.1-mini", "openai/o3", "openai/o4-mini", "openai/o1-preview"]:
|
||||
if model in ["openai/gpt-4o", "openai/gpt-4o-mini", "openai/gpt-5", "openai/gpt-5-nano", "openai/gpt-5-mini", "openai/gpt-5-chat", "openai/o1", "openai/o1-mini", "openai/o3-mini", "openai/gpt-4.1", "openai/gpt-4.1-nano", "openai/gpt-4.1-mini", "openai/o3", "openai/o4-mini", "openai/o1-preview"]:
|
||||
if model in ["openai/o1-mini", "openai/o1-preview"]:
|
||||
# For models without system prompt support, keep track separately
|
||||
if has_images:
|
||||
@@ -1421,7 +1421,12 @@ class MessageHandler:
|
||||
msg_tokens = self._count_tokens([msg])
|
||||
|
||||
if current_tokens + msg_tokens <= available_tokens:
|
||||
trimmed_history.insert(-1 if system_message else 0, msg) # Insert before system or at start
|
||||
if system_message:
|
||||
# Insert after system message (position 1)
|
||||
trimmed_history.insert(1, msg)
|
||||
else:
|
||||
# Insert at start if no system message
|
||||
trimmed_history.insert(0, msg)
|
||||
current_tokens += msg_tokens
|
||||
else:
|
||||
# Stop adding more messages
|
||||
|
||||
Reference in New Issue
Block a user