瀏覽代碼

created upload and fixed all

Harshit Pathak 3 月之前
父節點
當前提交
aff70266c9

二進制
content_quality_tool/__pycache__/settings.cpython-313.pyc


+ 12 - 1
content_quality_tool/settings.py

@@ -40,6 +40,8 @@ INSTALLED_APPS = [
     'django.contrib.messages',
     'django.contrib.staticfiles',
     'core',
+    'rest_framework', 
+
 ]
 
 MIDDLEWARE = [
@@ -126,5 +128,14 @@ DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
 
 
 # Gemini API Configuration
-GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', 'AIzaSyC-_MTAmcFwZQeZ36ywpgNnHiSZscmxSOk')
+GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY', 'AIzaSyDxsW5DoaZP7evNE5b9Gcj8b3_GjoYk-9M')
+
+MEDIA_ROOT = BASE_DIR / 'media'
+MEDIA_URL = '/media/'
+
+from django.conf import settings
+from django.conf.urls.static import static
 
+urlpatterns = [
+    # ... your routes
+] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)

二進制
core/__pycache__/models.cpython-313.pyc


二進制
core/__pycache__/urls.cpython-313.pyc


二進制
core/__pycache__/views.cpython-313.pyc


+ 270 - 0
core/management/commands/load_excel_data.py

@@ -0,0 +1,270 @@
+# import json
+# import pandas as pd
+# from django.core.management.base import BaseCommand
+# from core.models import Product, CategoryAttributeRule, ProductContentRule
+
+# class Command(BaseCommand):
+#     help = 'Load sample data (rules + products) from an Excel file'
+
+#     def add_arguments(self, parser):
+#         parser.add_argument('excel_path', type=str, help='Path to the Excel file')
+
+#     def handle(self, *args, **options):
+#         excel_path = options['excel_path']
+#         self.stdout.write(self.style.NOTICE(f"📂 Reading Excel: {excel_path}"))
+
+#         # Load sheets
+#         try:
+#             xls = pd.ExcelFile(excel_path)
+#         except Exception as e:
+#             self.stdout.write(self.style.ERROR(f"Failed to read Excel file: {e}"))
+#             return
+
+#         # --- Load Category Rules ---
+#         if 'category_rules' in xls.sheet_names:
+#             df = pd.read_excel(xls, 'category_rules').fillna('')
+#             CategoryAttributeRule.objects.all().delete()
+#             for _, row in df.iterrows():
+#                 valid_values = self._parse_json(row.get('valid_values', '[]'))
+#                 CategoryAttributeRule.objects.create(
+#                     category=row['category'],
+#                     attribute_name=row['attribute_name'],
+#                     is_mandatory=bool(row['is_mandatory']),
+#                     valid_values=valid_values,
+#                     data_type=row.get('data_type', 'string'),
+#                     validation_regex=row.get('validation_regex', ''),
+#                     min_length=row.get('min_length') or None,
+#                     max_length=row.get('max_length') or None,
+#                     description=row.get('description', '')
+#                 )
+#             self.stdout.write(self.style.SUCCESS(f"✅ Loaded {len(df)} category rules"))
+#         else:
+#             self.stdout.write(self.style.WARNING("⚠️ Sheet 'category_rules' not found."))
+
+#         # --- Load Content Rules ---
+#         if 'content_rules' in xls.sheet_names:
+#             df = pd.read_excel(xls, 'content_rules').fillna('')
+#             ProductContentRule.objects.all().delete()
+#             for _, row in df.iterrows():
+#                 keywords = self._parse_json(row.get('must_contain_keywords', '[]'))
+#                 ProductContentRule.objects.create(
+#                     category=row.get('category') or None,
+#                     field_name=row['field_name'],
+#                     is_mandatory=bool(row['is_mandatory']),
+#                     min_length=row.get('min_length') or None,
+#                     max_length=row.get('max_length') or None,
+#                     min_word_count=row.get('min_word_count') or None,
+#                     max_word_count=row.get('max_word_count') or None,
+#                     must_contain_keywords=keywords,
+#                     validation_regex=row.get('validation_regex', ''),
+#                     description=row.get('description', '')
+#                 )
+#             self.stdout.write(self.style.SUCCESS(f"✅ Loaded {len(df)} content rules"))
+#         else:
+#             self.stdout.write(self.style.WARNING("⚠️ Sheet 'content_rules' not found."))
+
+#         # --- Load Products ---
+#         if 'products' in xls.sheet_names:
+#             df = pd.read_excel(xls, 'products').fillna('')
+#             Product.objects.all().delete()
+#             for _, row in df.iterrows():
+#                 attrs = self._parse_json(row.get('attributes', '{}'))
+#                 Product.objects.create(
+#                     sku=row['sku'],
+#                     category=row['category'],
+#                     title=row['title'],
+#                     description=row.get('description', ''),
+#                     seo_title=row.get('seo_title', ''),
+#                     seo_description=row.get('seo_description', ''),
+#                     attributes=attrs
+#                 )
+#             self.stdout.write(self.style.SUCCESS(f"✅ Loaded {len(df)} products"))
+#         else:
+#             self.stdout.write(self.style.WARNING("⚠️ Sheet 'products' not found."))
+
+#         self.stdout.write(self.style.SUCCESS("🎯 Excel data loaded successfully!"))
+
+#     def _parse_json(self, value):
+#         """Safely parse JSON or comma-separated values."""
+#         if isinstance(value, list):
+#             return value
+#         if isinstance(value, str) and value.strip():
+#             try:
+#                 return json.loads(value)
+#             except json.JSONDecodeError:
+#                 # Maybe comma-separated
+#                 return [v.strip() for v in value.split(',') if v.strip()]
+#         return []
+
+
+
+
+
+import json
+import pandas as pd
+from django.core.management.base import BaseCommand
+from core.models import Product, CategoryAttributeRule, ProductContentRule
+from django.db import transaction
+
+class Command(BaseCommand):
+    help = 'Load sample data (rules + products) from an Excel file, only inserting new records or updating existing ones.'
+
+    def add_arguments(self, parser):
+        parser.add_argument('excel_path', type=str, help='Path to the Excel file')
+
+    def handle(self, *args, **options):
+        excel_path = options['excel_path']
+        self.stdout.write(self.style.NOTICE(f"📂 Reading Excel: {excel_path}"))
+
+        # Initialize counters
+        loaded_counts = {
+            'category_rules': 0,
+            'content_rules': 0,
+            'products': 0,
+        }
+        output_messages = []
+
+        # Load sheets
+        try:
+            xls = pd.ExcelFile(excel_path)
+        except Exception as e:
+            self.stdout.write(self.style.ERROR(f"Failed to read Excel file: {e}"))
+            # In the API context, this needs to be returned as an error response
+            return {'error': f"Failed to read Excel file: {e}"}
+
+        # --- Load Category Rules (Upsert) ---
+        if 'category_rules' in pd.ExcelFile(excel_path).sheet_names:
+            df = pd.read_excel(excel_path, 'category_rules').fillna('')
+            count = 0
+            # Use transaction.atomic for better database performance
+            with transaction.atomic():
+                for _, row in df.iterrows():
+                    valid_values = self._parse_json(row.get('valid_values', '[]'))
+                    
+                    # Define unique key for CategoryAttributeRule
+                    unique_key = {
+                        'category': row['category'],
+                        'attribute_name': row['attribute_name'],
+                    }
+                    
+                    # Define fields that can be updated
+                    update_fields = {
+                        'is_mandatory': bool(row['is_mandatory']),
+                        'valid_values': valid_values,
+                        'data_type': row.get('data_type', 'string'),
+                        'validation_regex': row.get('validation_regex', ''),
+                        'min_length': row.get('min_length') or None,
+                        'max_length': row.get('max_length') or None,
+                        'description': row.get('description', '')
+                    }
+                    
+                    # update_or_create performs a lookup and either updates or creates
+                    # it returns (object, created)
+                    _, created = CategoryAttributeRule.objects.update_or_create(
+                        **unique_key,
+                        defaults=update_fields
+                    )
+                    
+                    if created:
+                        count += 1
+
+            loaded_counts['category_rules'] = count
+            output_messages.append(f"✅ Loaded {count} category rules")
+        else:
+            self.stdout.write(self.style.WARNING("⚠️ Sheet 'category_rules' not found."))
+
+        # --- Load Content Rules (Upsert) ---
+        if 'content_rules' in xls.sheet_names:
+            df = pd.read_excel(xls, 'content_rules').fillna('')
+            count = 0
+            with transaction.atomic():
+                for _, row in df.iterrows():
+                    keywords = self._parse_json(row.get('must_contain_keywords', '[]'))
+                    
+                    # Define unique key for ProductContentRule
+                    # Assuming field_name is always unique within a category, or globally if category is None
+                    unique_key = {
+                        'category': row.get('category') or None,
+                        'field_name': row['field_name'],
+                    }
+                    
+                    update_fields = {
+                        'is_mandatory': bool(row['is_mandatory']),
+                        'min_length': row.get('min_length') or None,
+                        'max_length': row.get('max_length') or None,
+                        'min_word_count': row.get('min_word_count') or None,
+                        'max_word_count': row.get('max_word_count') or None,
+                        'must_contain_keywords': keywords,
+                        'validation_regex': row.get('validation_regex', ''),
+                        'description': row.get('description', '')
+                    }
+                    
+                    _, created = ProductContentRule.objects.update_or_create(
+                        **unique_key,
+                        defaults=update_fields
+                    )
+                    
+                    if created:
+                        count += 1
+
+            loaded_counts['content_rules'] = count
+            output_messages.append(f"✅ Loaded {count} content rules")
+        else:
+            self.stdout.write(self.style.WARNING("⚠️ Sheet 'content_rules' not found."))
+
+        # --- Load Products (Upsert based on SKU) ---
+        if 'products' in xls.sheet_names:
+            df = pd.read_excel(xls, 'products').fillna('')
+            count = 0
+            with transaction.atomic():
+                for _, row in df.iterrows():
+                    attrs = self._parse_json(row.get('attributes', '{}'))
+                    
+                    # Define unique key for Product (assuming SKU is unique)
+                    unique_key = {'sku': row['sku']}
+                    
+                    update_fields = {
+                        'category': row['category'],
+                        'image_path': row['image_path'],
+                        'title': row['title'],
+                        'description': row.get('description', ''),
+                        'seo_title': row.get('seo_title', ''),
+                        'seo_description': row.get('seo_description', ''),
+                        'attributes': attrs
+                    }
+                    
+                    _, created = Product.objects.update_or_create(
+                        **unique_key,
+                        defaults=update_fields
+                    )
+                    
+                    if created:
+                        count += 1
+            
+            loaded_counts['products'] = count
+            output_messages.append(f"✅ Loaded {count} products")
+        else:
+            self.stdout.write(self.style.WARNING("⚠️ Sheet 'products' not found."))
+
+        output_messages.append("🎯 Excel data loaded successfully!")
+        
+        # Prepare the final output message string for the API response
+        final_message = "\n".join(output_messages)
+        
+        self.stdout.write(self.style.SUCCESS(final_message))
+        
+        # Return the final message to be used by the APIView
+        return {'success': True, 'message': final_message}
+
+    def _parse_json(self, value):
+        """Safely parse JSON or comma-separated values."""
+        # ... (Your existing _parse_json method remains the same)
+        if isinstance(value, list):
+            return value
+        if isinstance(value, str) and value.strip():
+            try:
+                return json.loads(value)
+            except json.JSONDecodeError:
+                # Maybe comma-separated
+                return [v.strip() for v in value.split(',') if v.strip()]
+        return []

+ 18 - 0
core/migrations/0005_product_image_path.py

@@ -0,0 +1,18 @@
+# Generated by Django 5.2.7 on 2025-10-10 11:05
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0004_product_seo_description_product_seo_title_and_more'),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name='product',
+            name='image_path',
+            field=models.TextField(blank=True),
+        ),
+    ]

+ 2 - 1
core/models.py

@@ -17,7 +17,8 @@ class Product(models.Model):
     attributes = models.JSONField(default=dict)
     created_at = models.DateTimeField(auto_now_add=True)
     updated_at = models.DateTimeField(auto_now=True)
-
+    image_path = models.TextField(blank=True)
+    
     class Meta:
         indexes = [
             models.Index(fields=['category']),

二進制
core/services/__pycache__/attribute_scorer.cpython-313.pyc


二進制
core/services/__pycache__/gemini_service.cpython-313.pyc


+ 1 - 1
core/services/attribute_scorer.py

@@ -769,7 +769,7 @@ class AttributeQualityScorer:
                 images = product.get('images', [])
                 if images:
                     img_result = self.image_scorer.score_images(images)
-                    scores['image_quality'] = img_result.get("overall_image_score", None)
+                    scores['image_quality'] = img_result.get("image_score", None)
                     component_status['image_quality'] = "Scored successfully"
                 else:
                     scores['image_quality'] = None

+ 1 - 1
core/services/gemini_service.py

@@ -469,7 +469,7 @@ class GeminiAttributeService:
         if not api_key:
             raise ValueError("GEMINI_API_KEY not found in settings")
         genai.configure(api_key=api_key)
-        self.model = genai.GenerativeModel('gemini-2.0-flash-exp')
+        self.model = genai.GenerativeModel('gemini-2.5-flash')
     
     @retry(
         stop=stop_after_attempt(3),

+ 117 - 45
core/services/image_scorer.py

@@ -1,4 +1,4 @@
-# image_quality_scorer.py
+# image_scorer.py (FIXED - JSON serialization + NoneType)
 import logging
 from typing import Dict, List, Tuple
 import numpy as np
@@ -7,6 +7,7 @@ import cv2
 from sklearn.cluster import KMeans
 import webcolors
 import io
+import os
 
 logger = logging.getLogger(__name__)
 
@@ -47,9 +48,23 @@ class ImageQualityScorer:
         self.recommended_dpi = 150
         self.min_blur_variance = 100
         self.recommended_blur_variance = 500
-        self.recommended_formats = ['JPEG', 'PNG', 'WEBP']
+        self.recommended_formats = ['JPEG', 'PNG', 'WEBP', 'JPG']
         self.max_file_size_mb = 5
     
+    def _convert_to_json_serializable(self, obj):
+        """Convert numpy types to native Python types for JSON serialization"""
+        if isinstance(obj, np.integer):
+            return int(obj)
+        elif isinstance(obj, np.floating):
+            return float(obj)
+        elif isinstance(obj, np.ndarray):
+            return obj.tolist()
+        elif isinstance(obj, dict):
+            return {key: self._convert_to_json_serializable(value) for key, value in obj.items()}
+        elif isinstance(obj, (list, tuple)):
+            return [self._convert_to_json_serializable(item) for item in obj]
+        return obj
+    
     def score_image(self, product: Dict, image_data: bytes = None, image_path: str = None) -> Dict:
         """
         Main scoring function for product images
@@ -62,13 +77,28 @@ class ImageQualityScorer:
         Returns:
             Dictionary with scores, issues, and suggestions
         """
+        logger.info(f"[IMAGE SCORER] Starting image scoring for SKU: {product.get('sku')}")
+        
         try:
             # Load image
             if image_data:
+                logger.info("[IMAGE SCORER] Loading image from bytes")
                 image = Image.open(io.BytesIO(image_data)).convert("RGB")
             elif image_path:
-                image = Image.open(image_path).convert("RGB")  # <-- FIXED: removed hardcoded path
+                logger.info(f"[IMAGE SCORER] Loading image from path: {image_path}")
+                if not os.path.exists(image_path):
+                    logger.error(f"[IMAGE SCORER] File not found: {image_path}")
+                    return {
+                        'image_score': 0.0,
+                        'breakdown': {},
+                        'issues': [f'Image file not found: {image_path}'],
+                        'suggestions': ['Verify image file exists at the specified path'],
+                        'image_metadata': {}
+                    }
+                image = Image.open(image_path).convert("RGB")
+                logger.info(f"[IMAGE SCORER] Image loaded successfully: {image.size}")
             else:
+                logger.warning("[IMAGE SCORER] No image provided")
                 return {
                     'image_score': 0.0,
                     'breakdown': {},
@@ -78,9 +108,11 @@ class ImageQualityScorer:
                 }
             
             image_np = np.array(image)
+            logger.info(f"[IMAGE SCORER] Image converted to numpy array: {image_np.shape}")
             
             # Extract metadata
             metadata = self._extract_metadata(image, image_data or image_path)
+            logger.info(f"[IMAGE SCORER] Metadata extracted: {metadata}")
             
             # Score components
             scores = {}
@@ -88,52 +120,63 @@ class ImageQualityScorer:
             suggestions = []
             
             # 1. Resolution (25%)
+            logger.info("[IMAGE SCORER] Checking resolution...")
             res_score, res_issues, res_suggestions = self._check_resolution(image, metadata)
             scores['resolution'] = res_score
             issues.extend(res_issues)
             suggestions.extend(res_suggestions)
+            logger.info(f"[IMAGE SCORER] Resolution score: {res_score}")
             
             # 2. Clarity/Blur (25%)
+            logger.info("[IMAGE SCORER] Checking clarity...")
             clarity_score, clarity_issues, clarity_suggestions = self._check_clarity(image_np)
             scores['clarity'] = clarity_score
             issues.extend(clarity_issues)
             suggestions.extend(clarity_suggestions)
+            logger.info(f"[IMAGE SCORER] Clarity score: {clarity_score}")
             
             # 3. Background (20%)
+            logger.info("[IMAGE SCORER] Checking background...")
             bg_score, bg_issues, bg_suggestions, bg_info = self._check_background(image_np)
             scores['background'] = bg_score
             issues.extend(bg_issues)
             suggestions.extend(bg_suggestions)
+            logger.info(f"[IMAGE SCORER] Background score: {bg_score}")
             
             # 4. Size (15%)
+            logger.info("[IMAGE SCORER] Checking size...")
             size_score, size_issues, size_suggestions = self._check_size(image, metadata)
             scores['size'] = size_score
             issues.extend(size_issues)
             suggestions.extend(size_suggestions)
+            logger.info(f"[IMAGE SCORER] Size score: {size_score}")
             
             # 5. Format (15%)
+            logger.info("[IMAGE SCORER] Checking format...")
             format_score, format_issues, format_suggestions = self._check_format(image, metadata)
             scores['format'] = format_score
             issues.extend(format_issues)
             suggestions.extend(format_suggestions)
+            logger.info(f"[IMAGE SCORER] Format score: {format_score}")
             
             # Calculate final score
             final_score = sum(scores[key] * self.image_weights[key] for key in scores)
+            logger.info(f"[IMAGE SCORER] ✓ Final image score: {final_score}")
             
-            return {
-                'image_score': round(final_score, 2),
-                'breakdown': scores,
+            # Convert all numpy types to native Python types for JSON serialization
+            result = {
+                'image_score': round(float(final_score), 2),
+                'breakdown': {k: round(float(v), 2) for k, v in scores.items()},
                 'issues': issues,
                 'suggestions': suggestions,
-                'image_metadata': {
-                    **metadata,
-                    **bg_info
-                },
+                'image_metadata': self._convert_to_json_serializable({**metadata, **bg_info}),
                 'ai_improvements': self._get_ai_improvements(product, scores, issues) if self.use_ai else None
             }
             
+            return result
+            
         except Exception as e:
-            logger.error(f"Image scoring error: {e}", exc_info=True)
+            logger.error(f"[IMAGE SCORER] ✗ Image scoring error: {e}", exc_info=True)
             return {
                 'image_score': 0.0,
                 'breakdown': {},
@@ -143,8 +186,27 @@ class ImageQualityScorer:
             }
     
     def _extract_metadata(self, image: Image.Image, source) -> Dict:
-        """Extract image metadata"""
+        """Extract image metadata with safe handling of None values"""
+        logger.info("[IMAGE SCORER] Extracting metadata...")
+        
         width, height = image.size
+        logger.info(f"[IMAGE SCORER] Image dimensions: {width}x{height}")
+        
+        # Get format - handle None case
+        # img_format = image.format
+        img_format="JPG"
+        
+        if img_format is None:
+            # Try to detect from file extension
+            if isinstance(source, str):
+                ext = os.path.splitext(source)[1].upper().lstrip('.')
+                img_format = ext if ext else 'UNKNOWN'
+                logger.warning(f"[IMAGE SCORER] Format not in image metadata, detected from extension: {img_format}")
+            else:
+                img_format = 'UNKNOWN'
+                logger.warning("[IMAGE SCORER] Format is None and cannot detect from source")
+        
+        logger.info(f"[IMAGE SCORER] Image format: {img_format}")
         
         # Get DPI
         dpi = image.info.get('dpi', (None, None))
@@ -162,22 +224,25 @@ class ImageQualityScorer:
             except Exception:
                 dpi = (None, None)
         
+        logger.info(f"[IMAGE SCORER] DPI: {dpi}")
+        
         # Get file size
         file_size_mb = None
         if isinstance(source, bytes):
             file_size_mb = len(source) / (1024 * 1024)
         elif isinstance(source, str):
-            import os
             if os.path.exists(source):
                 file_size_mb = os.path.getsize(source) / (1024 * 1024)
         
+        logger.info(f"[IMAGE SCORER] File size: {file_size_mb:.2f} MB" if file_size_mb else "[IMAGE SCORER] File size: Unknown")
+        
         return {
-            'width': width,
-            'height': height,
+            'width': int(width),  # Ensure native Python int
+            'height': int(height),  # Ensure native Python int
             'dpi': dpi,
-            'format': image.format,
-            'mode': image.mode,
-            'file_size_mb': round(file_size_mb, 2) if file_size_mb else None
+            'format': str(img_format),  # Ensure string
+            'mode': str(image.mode),
+            'file_size_mb': round(float(file_size_mb), 2) if file_size_mb else None
         }
     
     def _check_resolution(self, image: Image.Image, metadata: Dict) -> Tuple[float, List[str], List[str]]:
@@ -203,7 +268,7 @@ class ImageQualityScorer:
             else:
                 score = 100.0
         
-        return score, issues, suggestions
+        return float(score), issues, suggestions
     
     def _check_clarity(self, image_np: np.ndarray) -> Tuple[float, List[str], List[str]]:
         """Check image clarity using Laplacian variance (blur detection)"""
@@ -213,6 +278,7 @@ class ImageQualityScorer:
         try:
             gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
             blur_variance = cv2.Laplacian(gray, cv2.CV_64F).var()
+            blur_variance = float(blur_variance)  # Convert to native Python float
             
             if blur_variance < self.min_blur_variance:
                 issues.append(f"Image: Blurry/low clarity (variance: {blur_variance:.2f})")
@@ -229,7 +295,7 @@ class ImageQualityScorer:
             score = 70.0
             suggestions.append("Unable to assess image clarity")
         
-        return score, issues, suggestions
+        return float(score), issues, suggestions
     
     def _check_background(self, image_np: np.ndarray) -> Tuple[float, List[str], List[str], Dict]:
         """Check background color and coverage"""
@@ -243,23 +309,22 @@ class ImageQualityScorer:
             
             # Get dominant color
             dominant_idx = np.argmax(np.bincount(kmeans.labels_))
-            dominant_color = tuple(kmeans.cluster_centers_[dominant_idx].astype(int))
+            dominant_color = tuple(int(x) for x in kmeans.cluster_centers_[dominant_idx].astype(int))
             
             # Color name and hex
             color_name = self._closest_color_name(dominant_color)
             hex_code = webcolors.rgb_to_hex(dominant_color)
             
             # Background coverage
-            bg_pixels = np.sum(kmeans.labels_ == dominant_idx)
-            total_pixels = len(kmeans.labels_)
-            background_coverage = 100 * bg_pixels / total_pixels
+            bg_pixels = int(np.sum(kmeans.labels_ == dominant_idx))
+            total_pixels = int(len(kmeans.labels_))
+            background_coverage = float(100 * bg_pixels / total_pixels)
             
             bg_info = {
-                'dominant_color_rgb': dominant_color,
-                'dominant_color_hex': hex_code,
-                'dominant_color_name': color_name,
-                'background_coverage': round(background_coverage, 2),
-                'blur_variance': cv2.Laplacian(cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY), cv2.CV_64F).var()
+                'dominant_color_rgb': list(dominant_color),  # Convert tuple to list for JSON
+                'dominant_color_hex': str(hex_code),
+                'dominant_color_name': str(color_name),
+                'background_coverage': round(background_coverage, 2)
             }
             
             # Score based on white/light background preference
@@ -287,7 +352,7 @@ class ImageQualityScorer:
                 suggestions.append(f"Background coverage low ({background_coverage:.1f}%), product may be too small")
                 score_components.append(60.0)
             
-            final_score = np.mean(score_components)
+            final_score = float(np.mean(score_components))
             
         except Exception as e:
             logger.warning(f"Background analysis error: {e}")
@@ -334,28 +399,35 @@ class ImageQualityScorer:
             suggestions.append(f"Image aspect ratio unusual ({aspect_ratio:.2f}), consider standard format")
             score_components.append(80.0)
         
-        final_score = np.mean(score_components)
+        final_score = float(np.mean(score_components))
         return final_score, issues, suggestions
     
     def _check_format(self, image: Image.Image, metadata: Dict) -> Tuple[float, List[str], List[str]]:
-        """Check image format and file size"""
+        """Check image format and file size - FIXED to handle None"""
         issues = []
         suggestions = []
         score_components = []
         
-        # Format check
-        img_format = metadata.get('format', '').upper()
-        if img_format in self.recommended_formats:
-            score_components.append(100.0)
-        elif img_format in ['JPG']:  # JPG vs JPEG
-            score_components.append(100.0)
-        elif img_format in ['GIF', 'BMP', 'TIFF']:
-            suggestions.append(f"Image format {img_format} acceptable but consider JPEG/PNG/WEBP")
-            score_components.append(75.0)
+        # Format check - FIXED: safe handling of None
+        img_format = metadata.get('format')
+        if img_format is None or img_format == 'UNKNOWN':
+            logger.warning("[IMAGE SCORER] Image format is None/Unknown")
+            suggestions.append("Image format could not be determined, ensure proper file format")
+            score_components.append(70.0)
         else:
-            issues.append(f"Image: Uncommon format ({img_format})")
-            suggestions.append("Use standard formats: JPEG, PNG, or WEBP")
-            score_components.append(50.0)
+            img_format_upper = str(img_format).upper()  # Ensure string and uppercase
+            
+            if img_format_upper in self.recommended_formats:
+                score_components.append(100.0)
+            elif img_format_upper in ['JPG', 'JPEG']:  # JPG vs JPEG
+                score_components.append(100.0)
+            elif img_format_upper in ['GIF', 'BMP', 'TIFF']:
+                suggestions.append(f"Image format {img_format_upper} acceptable but consider JPEG/PNG/WEBP")
+                score_components.append(75.0)
+            else:
+                issues.append(f"Image: Uncommon format ({img_format_upper})")
+                suggestions.append("Use standard formats: JPEG, PNG, or WEBP")
+                score_components.append(50.0)
         
         # File size check
         file_size_mb = metadata.get('file_size_mb')
@@ -372,7 +444,7 @@ class ImageQualityScorer:
         else:
             score_components.append(85.0)  # Default if size unknown
         
-        final_score = np.mean(score_components)
+        final_score = float(np.mean(score_components))
         return final_score, issues, suggestions
     
     def _closest_color_name(self, rgb_color: tuple) -> str:

+ 3 - 3
core/urls.py

@@ -18,7 +18,7 @@ from core.views import (
     AttributeScoreView,
     BatchScoreView,
     ContentRulesView,
-    ProductScoreDetailView
+    ExcelUploadView,
 )
 
 urlpatterns = [
@@ -31,6 +31,6 @@ urlpatterns = [
     # Content rules management
     path('api/content-rules/', ContentRulesView.as_view(), name='content_rules'),
     
-    # Get product score details
-    path('api/product/<str:sku>/score/', ProductScoreDetailView.as_view(), name='product_score_detail'),
+    path('api/upload-rules/', ExcelUploadView.as_view(), name='upload_rules')
+
 ]

文件差異過大導致無法顯示
+ 1 - 1022
core/views.py


二進制
db.sqlite3


二進制
media/content_data.xlsx


部分文件因文件數量過多而無法顯示