Refactor OpenAI utility functions: enhance descriptions, add parameters, and improve clarity
- Updated descriptions for functions to provide clearer guidance on usage. - Added detailed parameter descriptions for better understanding. - Introduced new function `remove_background` for background removal. - Adjusted parameter requirements and constraints across various functions. - Improved overall consistency and readability of the code.
This commit is contained in:
266
config/image_config.json
Normal file
266
config/image_config.json
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
{
|
||||||
|
"_comment": "Image Generation Configuration - Add/modify models here",
|
||||||
|
"_version": "2.0.0",
|
||||||
|
|
||||||
|
"settings": {
|
||||||
|
"default_model": "flux",
|
||||||
|
"default_upscale_model": "clarity",
|
||||||
|
"default_background_removal_model": "bria",
|
||||||
|
"connection_timeout": 120,
|
||||||
|
"max_retries": 3,
|
||||||
|
"retry_delay": 2,
|
||||||
|
"output_format": "WEBP",
|
||||||
|
"output_quality": 95
|
||||||
|
},
|
||||||
|
|
||||||
|
"image_models": {
|
||||||
|
"flux": {
|
||||||
|
"model_id": "runware:101@1",
|
||||||
|
"name": "FLUX.1",
|
||||||
|
"description": "High-quality FLUX model for general image generation",
|
||||||
|
"default_width": 1024,
|
||||||
|
"default_height": 1024,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 2048,
|
||||||
|
"max_height": 2048,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 30,
|
||||||
|
"default_cfg_scale": 7.5,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "general"
|
||||||
|
},
|
||||||
|
"flux-dev": {
|
||||||
|
"model_id": "runware:100@1",
|
||||||
|
"name": "FLUX.1 Dev",
|
||||||
|
"description": "FLUX.1 Development version with more creative outputs",
|
||||||
|
"default_width": 1024,
|
||||||
|
"default_height": 1024,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 2048,
|
||||||
|
"max_height": 2048,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 25,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "general"
|
||||||
|
},
|
||||||
|
"flux-fill": {
|
||||||
|
"model_id": "runware:102@1",
|
||||||
|
"name": "FLUX Fill",
|
||||||
|
"description": "FLUX model optimized for inpainting and editing",
|
||||||
|
"default_width": 1024,
|
||||||
|
"default_height": 1024,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 2048,
|
||||||
|
"max_height": 2048,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 30,
|
||||||
|
"default_cfg_scale": 7.5,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "editing"
|
||||||
|
},
|
||||||
|
"sdxl": {
|
||||||
|
"model_id": "civitai:101055@128078",
|
||||||
|
"name": "Stable Diffusion XL",
|
||||||
|
"description": "Stable Diffusion XL for detailed, high-resolution images",
|
||||||
|
"default_width": 1024,
|
||||||
|
"default_height": 1024,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 2048,
|
||||||
|
"max_height": 2048,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 30,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "general"
|
||||||
|
},
|
||||||
|
"realistic": {
|
||||||
|
"model_id": "civitai:4201@130072",
|
||||||
|
"name": "Realistic Vision",
|
||||||
|
"description": "Photorealistic image generation",
|
||||||
|
"default_width": 768,
|
||||||
|
"default_height": 768,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 1536,
|
||||||
|
"max_height": 1536,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 35,
|
||||||
|
"default_cfg_scale": 7.5,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "realistic"
|
||||||
|
},
|
||||||
|
"anime": {
|
||||||
|
"model_id": "civitai:4384@128713",
|
||||||
|
"name": "Anime Style",
|
||||||
|
"description": "Anime and illustration style images",
|
||||||
|
"default_width": 768,
|
||||||
|
"default_height": 768,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 1536,
|
||||||
|
"max_height": 1536,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 28,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "anime"
|
||||||
|
},
|
||||||
|
"dreamshaper": {
|
||||||
|
"model_id": "civitai:4384@128713",
|
||||||
|
"name": "DreamShaper",
|
||||||
|
"description": "Creative and artistic image generation",
|
||||||
|
"default_width": 768,
|
||||||
|
"default_height": 768,
|
||||||
|
"min_width": 512,
|
||||||
|
"min_height": 512,
|
||||||
|
"max_width": 1536,
|
||||||
|
"max_height": 1536,
|
||||||
|
"step_size": 64,
|
||||||
|
"default_steps": 30,
|
||||||
|
"default_cfg_scale": 7.0,
|
||||||
|
"supports_negative_prompt": true,
|
||||||
|
"max_images": 4,
|
||||||
|
"category": "artistic"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"upscale_models": {
|
||||||
|
"clarity": {
|
||||||
|
"model_id": "runware:500@1",
|
||||||
|
"name": "Clarity",
|
||||||
|
"description": "High-quality clarity upscaling",
|
||||||
|
"supported_factors": [2, 4],
|
||||||
|
"max_input_size": 2048,
|
||||||
|
"max_output_size": 4096,
|
||||||
|
"supports_prompts": true
|
||||||
|
},
|
||||||
|
"ccsr": {
|
||||||
|
"model_id": "runware:501@1",
|
||||||
|
"name": "CCSR",
|
||||||
|
"description": "Content-consistent super-resolution upscaling",
|
||||||
|
"supported_factors": [2, 4],
|
||||||
|
"max_input_size": 2048,
|
||||||
|
"max_output_size": 4096,
|
||||||
|
"supports_prompts": true
|
||||||
|
},
|
||||||
|
"sd-latent": {
|
||||||
|
"model_id": "runware:502@1",
|
||||||
|
"name": "SD Latent Upscaler",
|
||||||
|
"description": "Stable Diffusion latent space upscaling",
|
||||||
|
"supported_factors": [2],
|
||||||
|
"max_input_size": 2048,
|
||||||
|
"max_output_size": 4096,
|
||||||
|
"supports_prompts": true
|
||||||
|
},
|
||||||
|
"swinir": {
|
||||||
|
"model_id": "runware:503@1",
|
||||||
|
"name": "SwinIR",
|
||||||
|
"description": "Fast and efficient SwinIR upscaling (supports 4x)",
|
||||||
|
"supported_factors": [2, 4],
|
||||||
|
"max_input_size": 2048,
|
||||||
|
"max_output_size": 4096,
|
||||||
|
"supports_prompts": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"background_removal_models": {
|
||||||
|
"bria": {
|
||||||
|
"model_id": "runware:110@1",
|
||||||
|
"name": "Bria RMBG 2.0",
|
||||||
|
"description": "High-quality background removal by Bria",
|
||||||
|
"supports_alpha_matting": false
|
||||||
|
},
|
||||||
|
"rembg": {
|
||||||
|
"model_id": "runware:109@1",
|
||||||
|
"name": "RemBG 1.4",
|
||||||
|
"description": "Classic RemBG with alpha matting support",
|
||||||
|
"supports_alpha_matting": true
|
||||||
|
},
|
||||||
|
"birefnet-base": {
|
||||||
|
"model_id": "runware:112@1",
|
||||||
|
"name": "BiRefNet Base",
|
||||||
|
"description": "BiRefNet base model for background removal",
|
||||||
|
"supports_alpha_matting": false
|
||||||
|
},
|
||||||
|
"birefnet-general": {
|
||||||
|
"model_id": "runware:112@5",
|
||||||
|
"name": "BiRefNet General",
|
||||||
|
"description": "BiRefNet general purpose model",
|
||||||
|
"supports_alpha_matting": false
|
||||||
|
},
|
||||||
|
"birefnet-portrait": {
|
||||||
|
"model_id": "runware:112@10",
|
||||||
|
"name": "BiRefNet Portrait",
|
||||||
|
"description": "BiRefNet optimized for portraits",
|
||||||
|
"supports_alpha_matting": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"controlnet_models": {
|
||||||
|
"flux-canny": {
|
||||||
|
"model_id": "runware:25@1",
|
||||||
|
"name": "FLUX Canny",
|
||||||
|
"description": "Edge detection control for FLUX models",
|
||||||
|
"architecture": "flux"
|
||||||
|
},
|
||||||
|
"flux-depth": {
|
||||||
|
"model_id": "runware:27@1",
|
||||||
|
"name": "FLUX Depth",
|
||||||
|
"description": "Depth map control for FLUX models",
|
||||||
|
"architecture": "flux"
|
||||||
|
},
|
||||||
|
"flux-pose": {
|
||||||
|
"model_id": "runware:29@1",
|
||||||
|
"name": "FLUX Pose",
|
||||||
|
"description": "Pose control for FLUX models",
|
||||||
|
"architecture": "flux"
|
||||||
|
},
|
||||||
|
"sdxl-canny": {
|
||||||
|
"model_id": "runware:20@1",
|
||||||
|
"name": "SDXL Canny",
|
||||||
|
"description": "Edge detection control for SDXL models",
|
||||||
|
"architecture": "sdxl"
|
||||||
|
},
|
||||||
|
"sd15-canny": {
|
||||||
|
"model_id": "civitai:38784@44716",
|
||||||
|
"name": "SD 1.5 Canny",
|
||||||
|
"description": "Edge detection control for SD 1.5 models",
|
||||||
|
"architecture": "sd15"
|
||||||
|
},
|
||||||
|
"sd15-lineart": {
|
||||||
|
"model_id": "civitai:38784@44877",
|
||||||
|
"name": "SD 1.5 Line Art",
|
||||||
|
"description": "Line art control for SD 1.5 models",
|
||||||
|
"architecture": "sd15"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"default_negative_prompts": {
|
||||||
|
"general": "blurry, distorted, low quality, watermark, signature, text, bad anatomy, deformed",
|
||||||
|
"realistic": "cartoon, anime, illustration, painting, drawing, bad anatomy, deformed, blurry, low quality",
|
||||||
|
"anime": "realistic, photo, 3d render, bad anatomy, deformed hands, extra fingers, blurry",
|
||||||
|
"artistic": "bad quality, low resolution, blurry, watermark, signature"
|
||||||
|
},
|
||||||
|
|
||||||
|
"aspect_ratios": {
|
||||||
|
"1:1": {"width": 1024, "height": 1024, "description": "Square"},
|
||||||
|
"16:9": {"width": 1344, "height": 768, "description": "Landscape Wide"},
|
||||||
|
"9:16": {"width": 768, "height": 1344, "description": "Portrait Tall"},
|
||||||
|
"4:3": {"width": 1152, "height": 896, "description": "Landscape"},
|
||||||
|
"3:4": {"width": 896, "height": 1152, "description": "Portrait"},
|
||||||
|
"3:2": {"width": 1248, "height": 832, "description": "Photo Landscape"},
|
||||||
|
"2:3": {"width": 832, "height": 1248, "description": "Photo Portrait"},
|
||||||
|
"21:9": {"width": 1536, "height": 640, "description": "Ultrawide"}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +1,13 @@
|
|||||||
discord.py
|
discord.py
|
||||||
openai
|
openai
|
||||||
motor
|
motor
|
||||||
pymongo
|
pymongo[srv]
|
||||||
|
dnspython>=2.0.0
|
||||||
pypdf
|
pypdf
|
||||||
beautifulsoup4
|
beautifulsoup4
|
||||||
requests
|
requests
|
||||||
aiohttp
|
aiohttp
|
||||||
runware
|
runware>=0.4.33
|
||||||
python-dotenv
|
python-dotenv
|
||||||
matplotlib
|
matplotlib
|
||||||
pandas
|
pandas
|
||||||
|
|||||||
@@ -1,9 +1,34 @@
|
|||||||
import os
|
import os
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
# Load environment variables from .env file
|
# Load environment variables from .env file
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
|
||||||
|
# ==================== IMAGE CONFIGURATION ====================
|
||||||
|
# Load image configuration from JSON file
|
||||||
|
def load_image_config() -> dict:
|
||||||
|
"""Load image configuration from JSON file"""
|
||||||
|
config_paths = [
|
||||||
|
Path(__file__).parent.parent.parent / "config" / "image_config.json",
|
||||||
|
Path(__file__).parent.parent / "config" / "image_config.json",
|
||||||
|
Path("config/image_config.json"),
|
||||||
|
]
|
||||||
|
|
||||||
|
for config_path in config_paths:
|
||||||
|
if config_path.exists():
|
||||||
|
try:
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
return json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Error loading image config from {config_path}: {e}")
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Load image config once at module import
|
||||||
|
_IMAGE_CONFIG = load_image_config()
|
||||||
|
|
||||||
# Bot statuses
|
# Bot statuses
|
||||||
STATUSES = [
|
STATUSES = [
|
||||||
"Powered by openai/gpt-4o!",
|
"Powered by openai/gpt-4o!",
|
||||||
@@ -79,6 +104,61 @@ MODEL_OPTIONS = [
|
|||||||
"openai/o4-mini"
|
"openai/o4-mini"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# ==================== IMAGE GENERATION MODELS ====================
|
||||||
|
# Models are loaded from config/image_config.json
|
||||||
|
# Edit that file to add/modify image models
|
||||||
|
IMAGE_MODELS = _IMAGE_CONFIG.get("image_models", {
|
||||||
|
"flux": {
|
||||||
|
"model_id": "runware:101@1",
|
||||||
|
"name": "FLUX.1",
|
||||||
|
"description": "High-quality image generation with FLUX",
|
||||||
|
"default_width": 1024,
|
||||||
|
"default_height": 1024,
|
||||||
|
"max_width": 2048,
|
||||||
|
"max_height": 2048,
|
||||||
|
"supports_negative_prompt": True
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
# Upscale models from config
|
||||||
|
UPSCALE_MODELS = _IMAGE_CONFIG.get("upscale_models", {
|
||||||
|
"clarity": {
|
||||||
|
"model_id": "runware:500@1",
|
||||||
|
"name": "Clarity",
|
||||||
|
"supported_factors": [2, 4]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
# Background removal models from config
|
||||||
|
BACKGROUND_REMOVAL_MODELS = _IMAGE_CONFIG.get("background_removal_models", {
|
||||||
|
"bria": {
|
||||||
|
"model_id": "runware:110@1",
|
||||||
|
"name": "Bria RMBG 2.0"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
# Image settings from config
|
||||||
|
IMAGE_SETTINGS = _IMAGE_CONFIG.get("settings", {
|
||||||
|
"default_model": "flux",
|
||||||
|
"default_upscale_model": "clarity",
|
||||||
|
"default_background_removal_model": "bria"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Default image model
|
||||||
|
DEFAULT_IMAGE_MODEL = IMAGE_SETTINGS.get("default_model", "flux")
|
||||||
|
|
||||||
|
# Default negative prompts by category
|
||||||
|
DEFAULT_NEGATIVE_PROMPTS = _IMAGE_CONFIG.get("default_negative_prompts", {
|
||||||
|
"general": "blurry, distorted, low quality, watermark, signature, text, bad anatomy, deformed"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Aspect ratios from config
|
||||||
|
ASPECT_RATIOS = _IMAGE_CONFIG.get("aspect_ratios", {
|
||||||
|
"1:1": {"width": 1024, "height": 1024},
|
||||||
|
"16:9": {"width": 1344, "height": 768},
|
||||||
|
"9:16": {"width": 768, "height": 1344}
|
||||||
|
})
|
||||||
|
|
||||||
# Model-specific token limits for automatic history management
|
# Model-specific token limits for automatic history management
|
||||||
MODEL_TOKEN_LIMITS = {
|
MODEL_TOKEN_LIMITS = {
|
||||||
"openai/o1-preview": 4000, # Conservative limit (max 4000)
|
"openai/o1-preview": 4000, # Conservative limit (max 4000)
|
||||||
@@ -112,101 +192,116 @@ WEB_SCRAPING_PROMPT = "Analyze webpage content and extract key information. Focu
|
|||||||
|
|
||||||
NORMAL_CHAT_PROMPT = """You're ChatGPT for Discord. Be concise, helpful, safe. Reply in user's language. Use short paragraphs, bullets, minimal markdown.
|
NORMAL_CHAT_PROMPT = """You're ChatGPT for Discord. Be concise, helpful, safe. Reply in user's language. Use short paragraphs, bullets, minimal markdown.
|
||||||
|
|
||||||
Tools:
|
TOOLS:
|
||||||
- google_search: real-time info, fact-checking, news
|
1. google_search(query) - Web search for current info
|
||||||
- scrape_webpage: extract/analyze webpage content
|
2. scrape_webpage(url) - Extract webpage content
|
||||||
- execute_python_code: Python code execution with AUTO-INSTALL packages & file access
|
3. execute_python_code(code) - Run Python, packages auto-install. Use load_file('file_id') for user files. Save outputs to files.
|
||||||
- image_suite: generate/edit/upscale/create portraits
|
4. set_reminder(content, time) / get_reminders() - Manage reminders
|
||||||
- reminders: schedule/retrieve user reminders
|
|
||||||
- web_search_multi: parallel searches for comprehensive research
|
|
||||||
|
|
||||||
🐍 Code Interpreter (execute_python_code):
|
═══════════════════════════════════════════════════════════════
|
||||||
⚠️ CRITICAL: Packages AUTO-INSTALL when imported! ALWAYS import what you need - installation is automatic.
|
IMAGE GENERATION & EDITING TOOLS
|
||||||
|
═══════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
✅ Approved: pandas, numpy, matplotlib, seaborn, scikit-learn, tensorflow, pytorch, plotly, opencv, scipy, statsmodels, pillow, openpyxl, geopandas, folium, xgboost, lightgbm, bokeh, altair, and 80+ more.
|
5. generate_image(prompt, model, num_images, width, height, aspect_ratio, negative_prompt, steps, cfg_scale, seed)
|
||||||
|
Create images from text descriptions.
|
||||||
|
|
||||||
📂 File Access: When users upload files, you'll receive the file_id in the conversation context (e.g., "File ID: abc123_xyz"). Use load_file('file_id') to access them. The function auto-detects file types:
|
MODELS (use model parameter):
|
||||||
- CSV/TSV → pandas DataFrame
|
• "flux" - FLUX.1 (default, best quality, 1024x1024)
|
||||||
- Excel (.xlsx, .xls) → pandas ExcelFile object (use .sheet_names and .parse('Sheet1'))
|
• "flux-dev" - FLUX.1 Dev (more creative outputs)
|
||||||
- JSON → dict or DataFrame
|
• "sdxl" - Stable Diffusion XL (detailed, high-res)
|
||||||
- Images → PIL Image object
|
• "realistic" - Realistic Vision (photorealistic)
|
||||||
- Text → string content
|
• "anime" - Anime/illustration style
|
||||||
- And 200+ more formats...
|
• "dreamshaper" - Creative/artistic style
|
||||||
|
|
||||||
📊 Excel Files: load_file() returns ExcelFile object for multi-sheet support:
|
ASPECT RATIOS (use aspect_ratio parameter):
|
||||||
excel_file = load_file('file_id')
|
• "1:1" - Square (1024x1024)
|
||||||
sheets = excel_file.sheet_names # Get all sheet names
|
• "16:9" - Landscape wide (1344x768)
|
||||||
df = excel_file.parse('Sheet1') # Read specific sheet
|
• "9:16" - Portrait tall (768x1344)
|
||||||
# Or: df = pd.read_excel(excel_file, sheet_name='Sheet1')
|
• "4:3" - Landscape (1152x896)
|
||||||
# Check if sheet has data: if not df.empty and len(df.columns) > 0
|
• "3:4" - Portrait (896x1152)
|
||||||
|
• "3:2" - Photo landscape (1248x832)
|
||||||
|
• "2:3" - Photo portrait (832x1248)
|
||||||
|
• "21:9" - Ultrawide (1536x640)
|
||||||
|
|
||||||
⚠️ IMPORTANT:
|
Examples:
|
||||||
- If load_file() fails, error lists available file IDs - use the correct one
|
generate_image("a dragon in a forest", "flux", 1)
|
||||||
- Always check if DataFrames are empty before operations like .describe()
|
generate_image({"prompt": "sunset beach", "model": "realistic", "aspect_ratio": "16:9"})
|
||||||
- Excel files may have empty sheets - skip or handle them gracefully
|
generate_image({"prompt": "anime girl", "model": "anime", "width": 768, "height": 1024})
|
||||||
|
|
||||||
💾 Output Files: ALL generated files (CSV, images, JSON, text, plots, etc.) are AUTO-CAPTURED and sent to user. Files stored for 48h (configurable). Just create files - they're automatically shared!
|
6. generate_image_with_refiner(prompt, model, num_images)
|
||||||
|
Generate high-quality images using SDXL with refiner for better details.
|
||||||
|
Best for: detailed artwork, complex scenes
|
||||||
|
Example: generate_image_with_refiner("detailed fantasy castle", "sdxl", 1)
|
||||||
|
|
||||||
✅ DO:
|
7. upscale_image(image_url, scale_factor, model)
|
||||||
- Import packages directly (auto-installs!)
|
Enlarge images to higher resolution.
|
||||||
- Use load_file('file_id') with the EXACT file_id from context
|
|
||||||
- Check if DataFrames are empty: if not df.empty and len(df.columns) > 0
|
|
||||||
- Handle errors gracefully (empty sheets, missing data, etc.)
|
|
||||||
- Create output files with descriptive names
|
|
||||||
- Generate visualizations (plt.savefig, etc.)
|
|
||||||
- Return multiple files (data + plots + reports)
|
|
||||||
|
|
||||||
❌ DON'T:
|
UPSCALE MODELS:
|
||||||
- Check if packages are installed
|
• "clarity" - High-quality clarity upscaling (default)
|
||||||
- Use install_packages parameter
|
• "ccsr" - Content-consistent super-resolution
|
||||||
- Print large datasets (create CSV instead)
|
• "sd-latent" - SD latent space upscaling
|
||||||
- Manually handle file paths
|
• "swinir" - Fast SwinIR (supports 4x)
|
||||||
- Guess file_ids - use the exact ID from the upload message
|
|
||||||
|
|
||||||
Example:
|
SCALE FACTORS: 2 or 4 (depending on model)
|
||||||
```python
|
|
||||||
import pandas as pd
|
|
||||||
import seaborn as sns # Auto-installs!
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
# Load user's file (file_id from upload message: "File ID: 123456_abc")
|
Requires: User must provide an image URL first
|
||||||
data = load_file('123456_abc') # Auto-detects type
|
Example: upscale_image("https://example.com/image.jpg", 2, "clarity")
|
||||||
|
|
||||||
# For Excel files:
|
8. remove_background(image_url, model) / edit_image(image_url, "remove_background")
|
||||||
if hasattr(data, 'sheet_names'): # It's an ExcelFile
|
Remove background from images (outputs PNG with transparency).
|
||||||
for sheet in data.sheet_names:
|
|
||||||
df = data.parse(sheet)
|
|
||||||
if not df.empty and len(df.columns) > 0:
|
|
||||||
# Process non-empty sheets
|
|
||||||
summary = df.describe()
|
|
||||||
summary.to_csv(f'{sheet}_summary.csv')
|
|
||||||
else: # It's already a DataFrame (CSV, etc.)
|
|
||||||
df = data
|
|
||||||
summary = df.describe()
|
|
||||||
summary.to_csv('summary_stats.csv')
|
|
||||||
|
|
||||||
# Create visualization
|
BACKGROUND REMOVAL MODELS:
|
||||||
if not df.empty:
|
• "bria" - Bria RMBG 2.0 (default, high quality)
|
||||||
sns.heatmap(df.corr(), annot=True)
|
• "rembg" - RemBG 1.4 (classic, supports alpha matting)
|
||||||
plt.savefig('correlation_plot.png')
|
• "birefnet-base" - BiRefNet base model
|
||||||
|
• "birefnet-general" - BiRefNet general purpose
|
||||||
|
• "birefnet-portrait" - BiRefNet optimized for portraits
|
||||||
|
|
||||||
# Everything is automatically sent to user!
|
Requires: User must provide an image URL first
|
||||||
```
|
Example: remove_background("https://example.com/photo.jpg", "bria")
|
||||||
|
|
||||||
Smart Usage:
|
9. photo_maker(prompt, input_images, style, strength, num_images)
|
||||||
- Chain tools: search→scrape→analyze for deep research
|
Generate images based on reference photos (identity preservation).
|
||||||
- Auto-suggest relevant tools based on user intent
|
|
||||||
- Create multiple outputs (CSV, plots, reports) in one execution
|
|
||||||
- Use execute_python_code for ALL data analysis (replaces old analyze_data_file tool)
|
|
||||||
|
|
||||||
Rules:
|
Parameters:
|
||||||
- One clarifying question if ambiguous
|
• prompt: Text description of desired output
|
||||||
- Prioritize answers over details
|
• input_images: List of reference image URLs
|
||||||
- Cite sources: (Title – URL)
|
• style: Style to apply (default: "No style")
|
||||||
- Use execute_python_code for complex math & data analysis
|
• strength: Reference influence 0-100 (default: 40)
|
||||||
- Never invent sources
|
|
||||||
- Code fences for equations (no LaTeX)
|
Requires: User must provide reference images first
|
||||||
- Return image URLs with brief descriptions"""
|
Example: photo_maker({"prompt": "professional headshot", "input_images": ["url1", "url2"], "style": "Photographic"})
|
||||||
|
|
||||||
|
10. image_to_text(image_url)
|
||||||
|
Generate text description/caption from an image.
|
||||||
|
Use for: Understanding image content, accessibility, OCR-like tasks
|
||||||
|
Example: image_to_text("https://example.com/image.jpg")
|
||||||
|
|
||||||
|
11. enhance_prompt(prompt, num_versions, max_length)
|
||||||
|
Improve prompts for better image generation results.
|
||||||
|
Returns multiple enhanced versions of your prompt.
|
||||||
|
Example: enhance_prompt("cat on roof", 3, 200)
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════
|
||||||
|
USAGE GUIDELINES
|
||||||
|
═══════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
WHEN TO USE EACH TOOL:
|
||||||
|
• "create/draw/generate/make an image of X" → generate_image
|
||||||
|
• "high quality/detailed image" → generate_image_with_refiner
|
||||||
|
• "remove/delete background" → remove_background (pass 'latest_image')
|
||||||
|
• "make image bigger/larger/upscale" → upscale_image (pass 'latest_image')
|
||||||
|
• "create image like this/based on this photo" → photo_maker (pass ['latest_image'])
|
||||||
|
• "what's in this image/describe image" → image_to_text (pass 'latest_image')
|
||||||
|
• "improve this prompt" → enhance_prompt
|
||||||
|
|
||||||
|
IMPORTANT NOTES:
|
||||||
|
• For image tools (upscale, remove_background, photo_maker, image_to_text), when user uploads an image, pass 'latest_image' as the image_url parameter - the system automatically uses their most recent uploaded image
|
||||||
|
• You don't need to extract or copy image URLs - just use 'latest_image'
|
||||||
|
• Default model is "flux" - best for general use
|
||||||
|
• Use "realistic" for photos, "anime" for illustrations
|
||||||
|
• For math/data analysis → use execute_python_code instead
|
||||||
|
• Always cite sources (Title–URL) when searching web"""
|
||||||
|
|
||||||
SEARCH_PROMPT = "Research Assistant with Google Search access. Synthesize search results into accurate answers. Prioritize credible sources, compare perspectives, acknowledge limitations, cite sources. Structure responses logically."
|
SEARCH_PROMPT = "Research Assistant with Google Search access. Synthesize search results into accurate answers. Prioritize credible sources, compare perspectives, acknowledge limitations, cite sources. Structure responses logically."
|
||||||
|
|
||||||
|
|||||||
@@ -5,21 +5,43 @@ import asyncio
|
|||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Configure DNS resolver to be more resilient
|
||||||
|
try:
|
||||||
|
import dns.resolver
|
||||||
|
dns.resolver.default_resolver = dns.resolver.Resolver(configure=False)
|
||||||
|
dns.resolver.default_resolver.nameservers = ['8.8.8.8', '8.8.4.4', '1.1.1.1']
|
||||||
|
dns.resolver.default_resolver.lifetime = 15.0 # 15 second timeout for DNS
|
||||||
|
except ImportError:
|
||||||
|
logging.warning("dnspython not installed, using system DNS resolver")
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"Could not configure custom DNS resolver: {e}")
|
||||||
|
|
||||||
class DatabaseHandler:
|
class DatabaseHandler:
|
||||||
def __init__(self, mongodb_uri: str):
|
def __init__(self, mongodb_uri: str, max_retries: int = 5):
|
||||||
"""Initialize database connection with optimized settings"""
|
"""Initialize database connection with optimized settings and retry logic"""
|
||||||
# Set up a memory-optimized connection pool
|
self.mongodb_uri = mongodb_uri
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self._connected = False
|
||||||
|
self._connection_lock = asyncio.Lock()
|
||||||
|
|
||||||
|
# Set up a memory-optimized connection pool with better resilience
|
||||||
self.client = AsyncIOMotorClient(
|
self.client = AsyncIOMotorClient(
|
||||||
mongodb_uri,
|
mongodb_uri,
|
||||||
maxIdleTimeMS=30000, # Reduced from 45000
|
maxIdleTimeMS=45000, # Keep connections alive longer
|
||||||
connectTimeoutMS=8000, # Reduced from 10000
|
connectTimeoutMS=20000, # 20s connect timeout for DNS issues
|
||||||
serverSelectionTimeoutMS=12000, # Reduced from 15000
|
serverSelectionTimeoutMS=30000, # 30s for server selection
|
||||||
waitQueueTimeoutMS=3000, # Reduced from 5000
|
waitQueueTimeoutMS=10000, # Wait longer for available connection
|
||||||
socketTimeoutMS=25000, # Reduced from 30000
|
socketTimeoutMS=45000, # Socket operations timeout
|
||||||
maxPoolSize=8, # Limit connection pool size
|
maxPoolSize=10, # Slightly larger pool
|
||||||
minPoolSize=2, # Maintain minimum connections
|
minPoolSize=1, # Keep at least 1 connection
|
||||||
retryWrites=True
|
retryWrites=True,
|
||||||
|
retryReads=True, # Also retry reads
|
||||||
|
directConnection=False, # Allow replica set discovery
|
||||||
|
appName="ChatGPT-Discord-Bot",
|
||||||
|
heartbeatFrequencyMS=30000, # Reduce heartbeat frequency to avoid DNS issues
|
||||||
|
localThresholdMS=30, # Local threshold for selecting servers
|
||||||
)
|
)
|
||||||
self.db = self.client['chatgpt_discord_bot'] # Database name
|
self.db = self.client['chatgpt_discord_bot'] # Database name
|
||||||
|
|
||||||
@@ -32,12 +54,86 @@ class DatabaseHandler:
|
|||||||
self.logs_collection = self.db.logs
|
self.logs_collection = self.db.logs
|
||||||
self.reminders_collection = self.db.reminders
|
self.reminders_collection = self.db.reminders
|
||||||
|
|
||||||
logging.info("Database handler initialized")
|
logging.info("Database handler initialized with enhanced connection resilience")
|
||||||
|
|
||||||
|
async def _retry_operation(self, operation, *args, **kwargs):
|
||||||
|
"""Execute a database operation with retry logic for transient errors"""
|
||||||
|
last_error = None
|
||||||
|
for attempt in range(self.max_retries):
|
||||||
|
try:
|
||||||
|
return await operation(*args, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
last_error = e
|
||||||
|
error_str = str(e).lower()
|
||||||
|
# Check for transient/retryable errors (expanded list)
|
||||||
|
retryable_errors = [
|
||||||
|
'timeout', 'connection', 'socket', 'dns', 'try again',
|
||||||
|
'network', 'errno -3', 'gaierror', 'nodename', 'servname',
|
||||||
|
'temporary failure', 'name resolution', 'unreachable',
|
||||||
|
'reset by peer', 'broken pipe', 'not connected'
|
||||||
|
]
|
||||||
|
if any(err in error_str for err in retryable_errors):
|
||||||
|
wait_time = min((attempt + 1) * 2, 10) # Exponential backoff: 2s, 4s, 6s, 8s, 10s (max)
|
||||||
|
logging.warning(f"Database operation failed (attempt {attempt + 1}/{self.max_retries}): {e}. Retrying in {wait_time}s...")
|
||||||
|
await asyncio.sleep(wait_time)
|
||||||
|
else:
|
||||||
|
# Non-retryable error, raise immediately
|
||||||
|
raise
|
||||||
|
# All retries exhausted
|
||||||
|
logging.error(f"Database operation failed after {self.max_retries} attempts: {last_error}")
|
||||||
|
raise last_error
|
||||||
|
|
||||||
|
async def ensure_connected(self) -> bool:
|
||||||
|
"""Ensure database connection is established with retry logic"""
|
||||||
|
async with self._connection_lock:
|
||||||
|
if self._connected:
|
||||||
|
return True
|
||||||
|
|
||||||
|
for attempt in range(self.max_retries):
|
||||||
|
try:
|
||||||
|
await self.client.admin.command('ping')
|
||||||
|
self._connected = True
|
||||||
|
logging.info("Database connection established successfully")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
wait_time = min((attempt + 1) * 2, 10)
|
||||||
|
logging.warning(f"Database connection attempt {attempt + 1}/{self.max_retries} failed: {e}. Retrying in {wait_time}s...")
|
||||||
|
await asyncio.sleep(wait_time)
|
||||||
|
|
||||||
|
logging.error("Failed to establish database connection after all retries")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def check_connection(self) -> bool:
|
||||||
|
"""Check if database connection is alive with graceful error handling"""
|
||||||
|
try:
|
||||||
|
# Use a short timeout for the ping operation
|
||||||
|
await asyncio.wait_for(
|
||||||
|
self.client.admin.command('ping'),
|
||||||
|
timeout=10.0
|
||||||
|
)
|
||||||
|
self._connected = True
|
||||||
|
return True
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
logging.warning("Database ping timed out")
|
||||||
|
self._connected = False
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
error_str = str(e).lower()
|
||||||
|
# Don't log DNS resolution failures as errors (they're often transient)
|
||||||
|
if any(err in error_str for err in ['errno -3', 'try again', 'dns', 'gaierror']):
|
||||||
|
logging.debug(f"Transient database connection check failed (DNS): {e}")
|
||||||
|
else:
|
||||||
|
logging.error(f"Database connection check failed: {e}")
|
||||||
|
self._connected = False
|
||||||
|
return False
|
||||||
|
|
||||||
# User history methods
|
# User history methods
|
||||||
async def get_history(self, user_id: int) -> List[Dict[str, Any]]:
|
async def get_history(self, user_id: int) -> List[Dict[str, Any]]:
|
||||||
"""Get user conversation history and filter expired image links"""
|
"""Get user conversation history and filter expired image links"""
|
||||||
user_data = await self.db.user_histories.find_one({'user_id': user_id})
|
async def _get():
|
||||||
|
return await self.db.user_histories.find_one({'user_id': user_id})
|
||||||
|
|
||||||
|
user_data = await self._retry_operation(_get)
|
||||||
if user_data and 'history' in user_data:
|
if user_data and 'history' in user_data:
|
||||||
# Filter out expired image links
|
# Filter out expired image links
|
||||||
filtered_history = self._filter_expired_images(user_data['history'])
|
filtered_history = self._filter_expired_images(user_data['history'])
|
||||||
|
|||||||
@@ -135,6 +135,9 @@ class MessageHandler:
|
|||||||
self.user_charts = {} # Will be cleaned up periodically
|
self.user_charts = {} # Will be cleaned up periodically
|
||||||
self.max_user_files = 20 # Limit concurrent user files
|
self.max_user_files = 20 # Limit concurrent user files
|
||||||
|
|
||||||
|
# Store latest image URL per user (in-memory, refreshed from attachments)
|
||||||
|
self.user_latest_image_url = {}
|
||||||
|
|
||||||
# Tool mapping for API integration
|
# Tool mapping for API integration
|
||||||
self.tool_mapping = {
|
self.tool_mapping = {
|
||||||
"google_search": self._google_search,
|
"google_search": self._google_search,
|
||||||
@@ -142,6 +145,7 @@ class MessageHandler:
|
|||||||
"execute_python_code": self._execute_python_code,
|
"execute_python_code": self._execute_python_code,
|
||||||
"generate_image": self._generate_image,
|
"generate_image": self._generate_image,
|
||||||
"edit_image": self._edit_image,
|
"edit_image": self._edit_image,
|
||||||
|
"remove_background": self._remove_background,
|
||||||
"set_reminder": self._set_reminder,
|
"set_reminder": self._set_reminder,
|
||||||
"get_reminders": self._get_reminders,
|
"get_reminders": self._get_reminders,
|
||||||
"enhance_prompt": self._enhance_prompt,
|
"enhance_prompt": self._enhance_prompt,
|
||||||
@@ -200,6 +204,28 @@ class MessageHandler:
|
|||||||
return user_id
|
return user_id
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def _get_latest_image_url_from_db(self, user_id: int) -> str:
|
||||||
|
"""Get the latest valid image URL from user's history in database"""
|
||||||
|
try:
|
||||||
|
# Get history from database (already filtered for expired images)
|
||||||
|
history = await self.db.get_history(user_id)
|
||||||
|
|
||||||
|
# Find the latest image URL by iterating in reverse
|
||||||
|
for msg in reversed(history):
|
||||||
|
content = msg.get('content')
|
||||||
|
if isinstance(content, list):
|
||||||
|
for item in reversed(content):
|
||||||
|
if item.get('type') == 'image_url':
|
||||||
|
image_url_data = item.get('image_url', {})
|
||||||
|
url = image_url_data.get('url') if isinstance(image_url_data, dict) else None
|
||||||
|
if url:
|
||||||
|
logging.info(f"Found latest image URL from database: {url[:80]}...")
|
||||||
|
return url
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error getting latest image URL from database: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
def _count_tokens_with_tiktoken(self, text: str) -> int:
|
def _count_tokens_with_tiktoken(self, text: str) -> int:
|
||||||
"""Count tokens using tiktoken encoder for internal operations."""
|
"""Count tokens using tiktoken encoder for internal operations."""
|
||||||
if self.token_encoder is None:
|
if self.token_encoder is None:
|
||||||
@@ -1366,13 +1392,16 @@ print("\\n=== Correlation Analysis ===")
|
|||||||
content.append({"type": "text", "text": f"[Error processing {attachment.filename}: {str(e)}]"})
|
content.append({"type": "text", "text": f"[Error processing {attachment.filename}: {str(e)}]"})
|
||||||
|
|
||||||
elif any(attachment.filename.endswith(ext) for ext in ['.png', '.jpg', '.jpeg', '.gif', '.webp']):
|
elif any(attachment.filename.endswith(ext) for ext in ['.png', '.jpg', '.jpeg', '.gif', '.webp']):
|
||||||
|
# Store latest image URL for this user
|
||||||
|
self.user_latest_image_url[user_id] = attachment.url
|
||||||
|
logging.info(f"Stored latest image URL for user {user_id}")
|
||||||
|
|
||||||
content.append({
|
content.append({
|
||||||
"type": "image_url",
|
"type": "image_url",
|
||||||
"image_url": {
|
"image_url": {
|
||||||
"url": attachment.url,
|
"url": attachment.url,
|
||||||
"detail": "high"
|
"detail": "high"
|
||||||
},
|
}
|
||||||
"timestamp": datetime.now().isoformat() # Add timestamp to track image expiration
|
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
content.append({"type": "text", "text": f"[Attachment: {attachment.filename}] - I can't process this type of file directly."})
|
content.append({"type": "text", "text": f"[Attachment: {attachment.filename}] - I can't process this type of file directly."})
|
||||||
@@ -2087,6 +2116,25 @@ print("\\n=== Correlation Analysis ===")
|
|||||||
async def _image_to_text(self, args: Dict[str, Any]):
|
async def _image_to_text(self, args: Dict[str, Any]):
|
||||||
"""Convert image to text"""
|
"""Convert image to text"""
|
||||||
try:
|
try:
|
||||||
|
# Check if model passed "latest_image" - use stored URL
|
||||||
|
image_url = args.get("image_url", "")
|
||||||
|
if image_url == "latest_image" or not image_url:
|
||||||
|
user_id = self._find_user_id_from_current_task()
|
||||||
|
if user_id:
|
||||||
|
# Try in-memory first (from current session), then database
|
||||||
|
if user_id in self.user_latest_image_url:
|
||||||
|
args["image_url"] = self.user_latest_image_url[user_id]
|
||||||
|
logging.info(f"Using in-memory image URL for image_to_text")
|
||||||
|
else:
|
||||||
|
db_url = await self._get_latest_image_url_from_db(user_id)
|
||||||
|
if db_url:
|
||||||
|
args["image_url"] = db_url
|
||||||
|
logging.info(f"Using database image URL for image_to_text")
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
|
||||||
result = await self.image_generator.image_to_text(args)
|
result = await self.image_generator.image_to_text(args)
|
||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -2096,15 +2144,82 @@ print("\\n=== Correlation Analysis ===")
|
|||||||
async def _upscale_image(self, args: Dict[str, Any]):
|
async def _upscale_image(self, args: Dict[str, Any]):
|
||||||
"""Upscale an image"""
|
"""Upscale an image"""
|
||||||
try:
|
try:
|
||||||
|
# Check if model passed "latest_image" - use stored URL
|
||||||
|
image_url = args.get("image_url", "")
|
||||||
|
if image_url == "latest_image" or not image_url:
|
||||||
|
user_id = self._find_user_id_from_current_task()
|
||||||
|
if user_id:
|
||||||
|
# Try in-memory first (from current session), then database
|
||||||
|
if user_id in self.user_latest_image_url:
|
||||||
|
args["image_url"] = self.user_latest_image_url[user_id]
|
||||||
|
logging.info(f"Using in-memory image URL for upscale")
|
||||||
|
else:
|
||||||
|
db_url = await self._get_latest_image_url_from_db(user_id)
|
||||||
|
if db_url:
|
||||||
|
args["image_url"] = db_url
|
||||||
|
logging.info(f"Using database image URL for upscale")
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
|
||||||
result = await self.image_generator.upscale_image(args)
|
result = await self.image_generator.upscale_image(args)
|
||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error in image upscaling: {str(e)}")
|
logging.error(f"Error in image upscaling: {str(e)}")
|
||||||
return json.dumps({"error": f"Image upscaling failed: {str(e)}"})
|
return json.dumps({"error": f"Image upscaling failed: {str(e)}"})
|
||||||
|
|
||||||
|
async def _remove_background(self, args: Dict[str, Any]):
|
||||||
|
"""Remove background from an image"""
|
||||||
|
try:
|
||||||
|
# Check if model passed "latest_image" - use stored URL
|
||||||
|
image_url = args.get("image_url", "")
|
||||||
|
if image_url == "latest_image" or not image_url:
|
||||||
|
user_id = self._find_user_id_from_current_task()
|
||||||
|
if user_id:
|
||||||
|
# Try in-memory first (from current session), then database
|
||||||
|
if user_id in self.user_latest_image_url:
|
||||||
|
args["image_url"] = self.user_latest_image_url[user_id]
|
||||||
|
logging.info(f"Using in-memory image URL for background removal")
|
||||||
|
else:
|
||||||
|
db_url = await self._get_latest_image_url_from_db(user_id)
|
||||||
|
if db_url:
|
||||||
|
args["image_url"] = db_url
|
||||||
|
logging.info(f"Using database image URL for background removal")
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
|
||||||
|
result = await self.image_generator.remove_background(args)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error in background removal: {str(e)}")
|
||||||
|
return json.dumps({"error": f"Background removal failed: {str(e)}"})
|
||||||
|
|
||||||
async def _photo_maker(self, args: Dict[str, Any]):
|
async def _photo_maker(self, args: Dict[str, Any]):
|
||||||
"""Create a photo"""
|
"""Create a photo"""
|
||||||
try:
|
try:
|
||||||
|
# Check if model passed "latest_image" in input_images - use stored URL
|
||||||
|
input_images = args.get("input_images", [])
|
||||||
|
if input_images and "latest_image" in input_images:
|
||||||
|
user_id = self._find_user_id_from_current_task()
|
||||||
|
if user_id:
|
||||||
|
# Try in-memory first (from current session), then database
|
||||||
|
if user_id in self.user_latest_image_url:
|
||||||
|
url = self.user_latest_image_url[user_id]
|
||||||
|
args["input_images"] = [url if img == "latest_image" else img for img in input_images]
|
||||||
|
logging.info(f"Using in-memory image URL for photo_maker")
|
||||||
|
else:
|
||||||
|
db_url = await self._get_latest_image_url_from_db(user_id)
|
||||||
|
if db_url:
|
||||||
|
args["input_images"] = [db_url if img == "latest_image" else img for img in input_images]
|
||||||
|
logging.info(f"Using database image URL for photo_maker")
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
else:
|
||||||
|
return json.dumps({"error": "No image found. Please upload an image first."})
|
||||||
|
|
||||||
result = await self.image_generator.photo_maker(args)
|
result = await self.image_generator.photo_maker(args)
|
||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -28,12 +28,11 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "edit_image",
|
"name": "edit_image",
|
||||||
"description": "Edit images (remove background). Returns URLs.",
|
"description": "Remove background from an image. Requires image_url from user's uploaded image or a web URL.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"image_url": {"type": "string"},
|
"image_url": {"type": "string", "description": "URL of the image to edit"}
|
||||||
"operation": {"type": "string", "enum": ["remove_background"]}
|
|
||||||
},
|
},
|
||||||
"required": ["image_url"]
|
"required": ["image_url"]
|
||||||
}
|
}
|
||||||
@@ -43,12 +42,12 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "enhance_prompt",
|
"name": "enhance_prompt",
|
||||||
"description": "Create enhanced prompt versions.",
|
"description": "Improve and expand a prompt for better image generation results",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"prompt": {"type": "string"},
|
"prompt": {"type": "string", "description": "The prompt to enhance"},
|
||||||
"num_versions": {"type": "integer", "minimum": 1, "maximum": 5}
|
"num_versions": {"type": "integer", "maximum": 5, "description": "Number of enhanced versions"}
|
||||||
},
|
},
|
||||||
"required": ["prompt"]
|
"required": ["prompt"]
|
||||||
}
|
}
|
||||||
@@ -58,10 +57,10 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "image_to_text",
|
"name": "image_to_text",
|
||||||
"description": "Convert image to text.",
|
"description": "Generate a text description/caption of an image or extract text via OCR. When user uploads an image, pass 'latest_image' as image_url - the system will use the most recent uploaded image.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {"image_url": {"type": "string"}},
|
"properties": {"image_url": {"type": "string", "description": "Pass 'latest_image' to use the user's most recently uploaded image"}},
|
||||||
"required": ["image_url"]
|
"required": ["image_url"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -70,12 +69,13 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "upscale_image",
|
"name": "upscale_image",
|
||||||
"description": "Upscale image resolution. Returns URLs.",
|
"description": "Enlarge/upscale an image to higher resolution. When user uploads an image and wants to upscale it, pass 'latest_image' as the image_url - the system will use the most recent uploaded image.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"image_url": {"type": "string"},
|
"image_url": {"type": "string", "description": "Pass 'latest_image' to use the user's most recently uploaded image"},
|
||||||
"scale_factor": {"type": "integer", "enum": [2, 3, 4]}
|
"scale_factor": {"type": "integer", "enum": [2, 4], "description": "Scale factor (2 or 4)"},
|
||||||
|
"model": {"type": "string", "enum": ["clarity", "ccsr", "sd-latent", "swinir"], "description": "Upscale model to use"}
|
||||||
},
|
},
|
||||||
"required": ["image_url"]
|
"required": ["image_url"]
|
||||||
}
|
}
|
||||||
@@ -85,14 +85,15 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "photo_maker",
|
"name": "photo_maker",
|
||||||
"description": "Generate images from reference photos. Returns URLs.",
|
"description": "Generate new images based on reference photos. When user uploads an image and wants to use it as reference, pass ['latest_image'] as input_images - the system will use the most recent uploaded image.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"prompt": {"type": "string"},
|
"prompt": {"type": "string", "description": "Description of the desired output image"},
|
||||||
"input_images": {"type": "array", "items": {"type": "string"}},
|
"input_images": {"type": "array", "items": {"type": "string"}, "description": "Pass ['latest_image'] to use the user's most recently uploaded image"},
|
||||||
"strength": {"type": "integer", "minimum": 1, "maximum": 100},
|
"style": {"type": "string", "description": "Style to apply (e.g., 'Photographic', 'Cinematic', 'Anime')"},
|
||||||
"num_images": {"type": "integer", "minimum": 1, "maximum": 4}
|
"strength": {"type": "integer", "minimum": 0, "maximum": 100, "description": "Reference image influence (0-100)"},
|
||||||
|
"num_images": {"type": "integer", "maximum": 4, "description": "Number of images to generate"}
|
||||||
},
|
},
|
||||||
"required": ["prompt", "input_images"]
|
"required": ["prompt", "input_images"]
|
||||||
}
|
}
|
||||||
@@ -102,28 +103,44 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "generate_image_with_refiner",
|
"name": "generate_image_with_refiner",
|
||||||
"description": "Generate high-quality images. Returns URLs.",
|
"description": "Generate high-quality refined images with extra detail using SDXL refiner. Best for detailed artwork.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"prompt": {"type": "string"},
|
"prompt": {"type": "string", "description": "Detailed description of the image to generate"},
|
||||||
"num_images": {"type": "integer", "minimum": 1, "maximum": 4},
|
"model": {"type": "string", "enum": ["sdxl", "flux", "realistic"], "description": "Base model to use"},
|
||||||
"negative_prompt": {"type": "string"}
|
"num_images": {"type": "integer", "maximum": 4, "description": "Number of images to generate"},
|
||||||
|
"negative_prompt": {"type": "string", "description": "Things to avoid in the image"}
|
||||||
},
|
},
|
||||||
"required": ["prompt"]
|
"required": ["prompt"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "remove_background",
|
||||||
|
"description": "Remove background from an image. When user uploads an image and wants to remove its background, pass 'latest_image' as the image_url - the system will use the most recent uploaded image.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"image_url": {"type": "string", "description": "Pass 'latest_image' to use the user's most recently uploaded image"},
|
||||||
|
"model": {"type": "string", "enum": ["bria", "rembg", "birefnet-base", "birefnet-general", "birefnet-portrait"], "description": "Background removal model"}
|
||||||
|
},
|
||||||
|
"required": ["image_url"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "google_search",
|
"name": "google_search",
|
||||||
"description": "Search web for current information.",
|
"description": "Search the web for current information",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"query": {"type": "string"},
|
"query": {"type": "string"},
|
||||||
"num_results": {"type": "integer", "minimum": 1, "maximum": 10}
|
"num_results": {"type": "integer", "maximum": 10}
|
||||||
},
|
},
|
||||||
"required": ["query"]
|
"required": ["query"]
|
||||||
}
|
}
|
||||||
@@ -133,10 +150,10 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "scrape_webpage",
|
"name": "scrape_webpage",
|
||||||
"description": "Extract content from webpage.",
|
"description": "Extract and read content from a webpage URL",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {"url": {"type": "string"}},
|
"properties": {"url": {"type": "string", "description": "The webpage URL to scrape"}},
|
||||||
"required": ["url"]
|
"required": ["url"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -145,12 +162,20 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "generate_image",
|
"name": "generate_image",
|
||||||
"description": "Generate images from text. Returns URLs.",
|
"description": "Create/generate images from text. Models: flux (best), flux-dev, sdxl, realistic (photos), anime, dreamshaper. Supports aspect ratios.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"prompt": {"type": "string"},
|
"prompt": {"type": "string", "description": "Detailed description of the image to create"},
|
||||||
"num_images": {"type": "integer", "minimum": 1, "maximum": 4}
|
"model": {"type": "string", "enum": ["flux", "flux-dev", "sdxl", "realistic", "anime", "dreamshaper"], "description": "Model to use for generation"},
|
||||||
|
"num_images": {"type": "integer", "maximum": 4, "description": "Number of images (1-4)"},
|
||||||
|
"aspect_ratio": {"type": "string", "enum": ["1:1", "16:9", "9:16", "4:3", "3:4", "3:2", "2:3", "21:9"], "description": "Aspect ratio preset"},
|
||||||
|
"width": {"type": "integer", "description": "Custom width (512-2048, divisible by 64)"},
|
||||||
|
"height": {"type": "integer", "description": "Custom height (512-2048, divisible by 64)"},
|
||||||
|
"negative_prompt": {"type": "string", "description": "Things to avoid in the image"},
|
||||||
|
"steps": {"type": "integer", "minimum": 10, "maximum": 50, "description": "Inference steps (more = higher quality)"},
|
||||||
|
"cfg_scale": {"type": "number", "minimum": 1, "maximum": 20, "description": "Guidance scale (higher = more prompt adherence)"},
|
||||||
|
"seed": {"type": "integer", "description": "Random seed for reproducibility"}
|
||||||
},
|
},
|
||||||
"required": ["prompt"]
|
"required": ["prompt"]
|
||||||
}
|
}
|
||||||
@@ -160,33 +185,12 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "execute_python_code",
|
"name": "execute_python_code",
|
||||||
"description": """Execute Python with AUTO-INSTALL. Packages (pandas, numpy, matplotlib, seaborn, sklearn, plotly, opencv, etc.) install automatically when imported. Just use 'import' normally. Generated files (CSV, images, JSON) auto-captured and sent to user (stored 48h). Load user files: load_file('file_id'). Example: import pandas as pd; df=load_file('id'); df.to_csv('out.csv')""",
|
"description": "Run Python code. Packages auto-install. Use load_file('file_id') for user files. Output files auto-sent to user.",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"code": {
|
"code": {"type": "string", "description": "Python code to execute"},
|
||||||
"type": "string",
|
"timeout": {"type": "integer", "maximum": 300, "description": "Timeout in seconds"}
|
||||||
"description": "Python code to execute. Import any approved package - they auto-install!"
|
|
||||||
},
|
|
||||||
"input_data": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Optional input data (DEPRECATED - use load_file() in code instead)"
|
|
||||||
},
|
|
||||||
"install_packages": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {"type": "string"},
|
|
||||||
"description": "OPTIONAL: Pre-install packages. Usually not needed as packages auto-install on import."
|
|
||||||
},
|
|
||||||
"enable_visualization": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "DEPRECATED: Just use plt.savefig() to create images"
|
|
||||||
},
|
|
||||||
"timeout": {
|
|
||||||
"type": "integer",
|
|
||||||
"minimum": 1,
|
|
||||||
"maximum": 300,
|
|
||||||
"description": "Execution timeout in seconds (default: 60)"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"required": ["code"]
|
"required": ["code"]
|
||||||
}
|
}
|
||||||
@@ -196,7 +200,7 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "set_reminder",
|
"name": "set_reminder",
|
||||||
"description": "Set user reminder with flexible time formats.",
|
"description": "Set reminder",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@@ -211,7 +215,7 @@ def get_tools_for_model() -> List[Dict[str, Any]]:
|
|||||||
"type": "function",
|
"type": "function",
|
||||||
"function": {
|
"function": {
|
||||||
"name": "get_reminders",
|
"name": "get_reminders",
|
||||||
"description": "Get user reminders list.",
|
"description": "List reminders",
|
||||||
"parameters": {"type": "object", "properties": {}}
|
"parameters": {"type": "object", "properties": {}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user