# # ==================== apps.py ==================== # from django.apps import AppConfig # import logging # logger = logging.getLogger(__name__) # class ProductAttributesConfig(AppConfig): # default_auto_field = 'django.db.models.BigAutoField' # name = 'attr_extraction' # Replace with your actual app name # def ready(self): # """ # ๐Ÿ”ฅ CRITICAL: Pre-load all heavy models during Django startup # This runs ONCE when the server starts, not on every request # """ # import time # from django.conf import settings # # Only load models if not in migration/management command # import sys # if 'migrate' in sys.argv or 'makemigrations' in sys.argv: # return # logger.info("=" * 60) # logger.info("๐Ÿ”ฅ WARMING UP ML MODELS (one-time startup delay)") # logger.info("=" * 60) # startup_time = time.time() # # 1. Pre-load Sentence Transformer (already done at module level in services.py) # logger.info("โœ“ Sentence Transformer: already loaded at module level") # # 2. Pre-load CLIP model # try: # clip_start = time.time() # from .visual_processing_service import VisualProcessingService # VisualProcessingService._get_clip_model() # clip_time = time.time() - clip_start # logger.info(f"โœ“ CLIP model loaded in {clip_time:.1f}s") # except Exception as e: # logger.warning(f"โš ๏ธ CLIP model loading failed: {e}") # # 3. Pre-load OCR model # try: # ocr_start = time.time() # from .ocr_service import OCRService # ocr_service = OCRService() # ocr_service._get_reader() # ocr_time = time.time() - ocr_start # logger.info(f"โœ“ OCR model loaded in {ocr_time:.1f}s") # except Exception as e: # logger.warning(f"โš ๏ธ OCR model loading failed: {e}") # total_time = time.time() - startup_time # logger.info("=" * 60) # logger.info(f"๐ŸŽ‰ ALL MODELS READY in {total_time:.1f}s") # logger.info("โšก First API request will now be FAST (2-5 seconds)") # logger.info("=" * 60) # # ==================== attr_extraction/apps.py ==================== # from django.apps import AppConfig # import logging # import sys # import threading # logger = logging.getLogger(__name__) # class AttrExtractionConfig(AppConfig): # โœ… This is the correct name # default_auto_field = 'django.db.models.BigAutoField' # name = 'attr_extraction' # # Flag to prevent double loading # models_loaded = False # def ready(self): # """ # ๐Ÿ”ฅ Pre-load all heavy ML models during Django startup. # Uses background thread to not block server startup. # """ # # Skip during migrations/management commands # if any(cmd in sys.argv for cmd in ['migrate', 'makemigrations', 'test', 'collectstatic', 'shell']): # return # # Prevent double loading # if AttrExtractionConfig.models_loaded: # logger.info("โญ๏ธ Models already loaded, skipping...") # return # AttrExtractionConfig.models_loaded = True # # ๐Ÿ”ฅ Load models in background thread (non-blocking) # thread = threading.Thread(target=self._load_models, daemon=True) # thread.start() # logger.info("๐Ÿ”„ Model loading started in background...") # def _load_models(self): # """Background thread to load heavy models.""" # import time # logger.info("=" * 70) # logger.info("๐Ÿ”ฅ WARMING UP ML MODELS (background process)") # logger.info("=" * 70) # startup_time = time.time() # total_loaded = 0 # # 1. Sentence Transformer # try: # logger.info("๐Ÿ“ฅ Loading Sentence Transformer...") # st_start = time.time() # from .services import model_embedder # st_time = time.time() - st_start # logger.info(f"โœ“ Sentence Transformer ready ({st_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ Sentence Transformer failed: {e}") # # 2. Pre-load CLIP model # try: # logger.info("๐Ÿ“ฅ Loading CLIP model (20-30s)...") # clip_start = time.time() # from .visual_processing_service import VisualProcessingService # VisualProcessingService._get_clip_model() # clip_time = time.time() - clip_start # logger.info(f"โœ“ CLIP model cached ({clip_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ CLIP model failed: {e}") # # 3. Pre-load OCR model # try: # logger.info("๐Ÿ“ฅ Loading EasyOCR model...") # ocr_start = time.time() # from .ocr_service import OCRService # ocr_service = OCRService() # ocr_service._get_reader() # ocr_time = time.time() - ocr_start # logger.info(f"โœ“ OCR model cached ({ocr_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ OCR model failed: {e}") # total_time = time.time() - startup_time # logger.info("=" * 70) # logger.info(f"๐ŸŽ‰ {total_loaded}/3 MODELS LOADED in {total_time:.1f}s") # logger.info("โšก API requests are now FAST (2-5 seconds)") # logger.info("=" * 70) # # ==================== attr_extraction/apps.py ==================== # from django.apps import AppConfig # import logging # import sys # import os # import threading # from django.core.cache import cache # โœ… Import Django cache # logger = logging.getLogger(__name__) # class AttrExtractionConfig(AppConfig): # default_auto_field = 'django.db.models.BigAutoField' # name = 'attr_extraction' # # Flag to prevent double loading # models_loaded = False # def ready(self): # """ # ๐Ÿ”ฅ Pre-load all heavy ML models during Django startup. # """ # # Skip during migrations/management commands # if any(cmd in sys.argv for cmd in ['migrate', 'makemigrations', 'test', 'collectstatic', 'shell']): # return # # ๐Ÿ”ฅ CRITICAL: Skip in Django autoreloader parent process # # Only run in the actual worker process # if os.environ.get('RUN_MAIN') != 'true': # logger.info("โญ๏ธ Skipping model loading in autoreloader parent process") # return # # Prevent double loading # if AttrExtractionConfig.models_loaded: # logger.info("โญ๏ธ Models already loaded, skipping...") # return # AttrExtractionConfig.models_loaded = True # # ๐Ÿ”ฅ Load models in background thread (non-blocking) # thread = threading.Thread(target=self._load_models, daemon=True) # thread.start() # logger.info("๐Ÿ”„ Model loading started in background...") # def _load_models(self): # """Background thread to load heavy models.""" # import time # logger.info("=" * 70) # logger.info("๐Ÿ”ฅ WARMING UP ML MODELS (background process)") # logger.info("=" * 70) # startup_time = time.time() # total_loaded = 0 # # 1. Sentence Transformer # try: # logger.info("๐Ÿ“ฅ Loading Sentence Transformer...") # st_start = time.time() # from .services import model_embedder # st_time = time.time() - st_start # logger.info(f"โœ“ Sentence Transformer ready ({st_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ Sentence Transformer failed: {e}") # # 2. Pre-load CLIP model # try: # logger.info("๐Ÿ“ฅ Loading CLIP model (20-30s)...") # clip_start = time.time() # from .visual_processing_service import VisualProcessingService # VisualProcessingService._get_clip_model() # clip_time = time.time() - clip_start # logger.info(f"โœ“ CLIP model cached ({clip_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ CLIP model failed: {e}") # # 3. Pre-load OCR model # try: # logger.info("๐Ÿ“ฅ Loading EasyOCR model...") # ocr_start = time.time() # from .ocr_service import OCRService # ocr_service = OCRService() # ocr_service._get_reader() # ocr_time = time.time() - ocr_start # logger.info(f"โœ“ OCR model cached ({ocr_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ OCR model failed: {e}") # total_time = time.time() - startup_time # logger.info("=" * 70) # logger.info(f"๐ŸŽ‰ {total_loaded}/3 MODELS LOADED in {total_time:.1f}s") # logger.info("โšก API requests are now FAST (2-5 seconds)") # logger.info("=" * 70) # # ==================== attr_extraction/apps.py ==================== # from django.apps import AppConfig # import logging # import sys # import os # import threading # from django.core.cache import cache # โœ… Import Django cache # logger = logging.getLogger(__name__) # class AttrExtractionConfig(AppConfig): # default_auto_field = 'django.db.models.BigAutoField' # name = 'attr_extraction' # models_loaded = False # def ready(self): # """ # ๐Ÿ”ฅ Pre-load all heavy ML models during Django startup. # Also clears Django cache once when the server starts. # """ # # Skip during migrations/management commands # if any(cmd in sys.argv for cmd in ['migrate', 'makemigrations', 'test', 'collectstatic', 'shell']): # return # # Skip in Django autoreloader parent process # if os.environ.get('RUN_MAIN') != 'true': # logger.info("โญ๏ธ Skipping model loading in autoreloader parent process") # return # # โœ… Clear cache once per startup # try: # cache.clear() # logger.info("๐Ÿงน Django cache cleared successfully on startup.") # except Exception as e: # logger.warning(f"โš ๏ธ Failed to clear cache: {e}") # # Prevent double loading # if AttrExtractionConfig.models_loaded: # logger.info("โญ๏ธ Models already loaded, skipping...") # return # AttrExtractionConfig.models_loaded = True # # Load models in background thread (non-blocking) # thread = threading.Thread(target=self._load_models, daemon=True) # thread.start() # logger.info("๐Ÿ”„ Model loading started in background...") # def _load_models(self): # """Background thread to load heavy models.""" # import time # logger.info("=" * 70) # logger.info("๐Ÿ”ฅ WARMING UP ML MODELS (background process)") # logger.info("=" * 70) # startup_time = time.time() # total_loaded = 0 # # 1. Sentence Transformer # # try: # # logger.info("๐Ÿ“ฅ Loading Sentence Transformer...") # # st_start = time.time() # # from .services import model_embedder # # st_time = time.time() - st_start # # logger.info(f"โœ“ Sentence Transformer ready ({st_time:.1f}s)") # # total_loaded += 1 # # except Exception as e: # # logger.error(f"โŒ Sentence Transformer failed: {e}") # # 2. Pre-load CLIP model # try: # logger.info("๐Ÿ“ฅ Loading CLIP model (20-30s)...") # clip_start = time.time() # from .visual_processing_service import VisualProcessingService # VisualProcessingService._get_clip_model() # clip_time = time.time() - clip_start # logger.info(f"โœ“ CLIP model cached ({clip_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ CLIP model failed: {e}") # # 3. Pre-load OCR model # try: # logger.info("๐Ÿ“ฅ Loading EasyOCR model...") # ocr_start = time.time() # from .ocr_service import OCRService # ocr_service = OCRService() # ocr_service._get_reader() # ocr_time = time.time() - ocr_start # logger.info(f"โœ“ OCR model cached ({ocr_time:.1f}s)") # total_loaded += 1 # except Exception as e: # logger.error(f"โŒ OCR model failed: {e}") # total_time = time.time() - startup_time # logger.info("=" * 70) # logger.info(f"๐ŸŽ‰ {total_loaded}/3 MODELS LOADED in {total_time:.1f}s") # logger.info("โšก API requests are now FAST (2-5 seconds)") # logger.info("=" * 70) # ==================== attr_extraction/apps.py ==================== from django.apps import AppConfig import logging import sys import os import threading from django.core.cache import cache # โœ… Import Django cache logger = logging.getLogger(__name__) class AttrExtractionConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'attr_extraction' models_loaded = False def ready(self): """ ๐Ÿ”ฅ Pre-load all heavy ML models during Django startup. Also clears Django cache once when the server starts. """ # Skip during migrations/management commands if any(cmd in sys.argv for cmd in ['migrate', 'makemigrations', 'test', 'collectstatic', 'shell']): return # Skip in Django autoreloader parent process if os.environ.get('RUN_MAIN') != 'true': logger.info("โญ๏ธ Skipping model loading in autoreloader parent process") return # โœ… Clear cache once per startup try: cache.clear() logger.info("๐Ÿงน Django cache cleared successfully on startup.") except Exception as e: logger.warning(f"โš ๏ธ Failed to clear cache: {e}") # Prevent double loading if AttrExtractionConfig.models_loaded: logger.info("โญ๏ธ Models already loaded, skipping...") return AttrExtractionConfig.models_loaded = True # Load models in background thread (non-blocking) thread = threading.Thread(target=self._load_models, daemon=True) thread.start() logger.info("๐Ÿ”„ Model loading started in background...") def _load_models(self): """Background thread to load heavy models.""" import time logger.info("=" * 70) logger.info("๐Ÿ”ฅ WARMING UP ML MODELS (background process)") logger.info("=" * 70) startup_time = time.time() total_loaded = 0 # REMOVED: Sentence Transformer (no longer used in services.py) # 1. Pre-load CLIP model try: logger.info("๐Ÿ“ฅ Loading CLIP model (20-30s)...") clip_start = time.time() from .visual_processing_service import VisualProcessingService VisualProcessingService._get_clip_model() clip_time = time.time() - clip_start logger.info(f"โœ“ CLIP model cached ({clip_time:.1f}s)") total_loaded += 1 except Exception as e: logger.error(f"โŒ CLIP model failed: {e}") # 2. Pre-load OCR model try: logger.info("๐Ÿ“ฅ Loading EasyOCR model...") ocr_start = time.time() from .ocr_service import OCRService ocr_service = OCRService() ocr_service._get_reader() ocr_time = time.time() - ocr_start logger.info(f"โœ“ OCR model cached ({ocr_time:.1f}s)") total_loaded += 1 except Exception as e: logger.error(f"โŒ OCR model failed: {e}") total_time = time.time() - startup_time logger.info("=" * 70) logger.info(f"๐ŸŽ‰ {total_loaded}/2 MODELS LOADED in {total_time:.1f}s") logger.info("โšก API requests are now FAST (2-5 seconds)") logger.info("=" * 70)