Harshit Pathak 3 luni în urmă
părinte
comite
7a7f11704e
2 a modificat fișierele cu 375 adăugiri și 164 ștergeri
  1. 35 37
      attr_extraction/services.py
  2. 340 127
      attr_extraction/views.py

+ 35 - 37
attr_extraction/services.py

@@ -1,6 +1,12 @@
 
 
-# # ==================== services.py (WITH USER VALUE REASONING) ====================
+################## VERSION WORKING GOOD, BUT COMMENTING TO GET MULTIPLE VALUES WITH SEMANTIC MATCH ALSO #################
+
+
+
+
+
+
 # import json
 # import hashlib
 # import logging
@@ -205,12 +211,10 @@
 # {chr(10).join(user_lines)}
 
 # IMPORTANT INSTRUCTIONS FOR USER VALUES:
-# 1. Compare the user-entered value with what you find in the product text
-# 2. Evaluate if the user value is correct, partially correct, or incorrect for this product
-# 3. Choose the BEST value (could be user's value, or from allowed list, or inferred)
-# 4. Always provide a "reason" field explaining your decision
-# 5. DO NOT hallucinate - be honest if user's value seems wrong based on product evidence
-# 6. If user's value is not in the allowed list but seems correct, chose the most nearest value from the allowed list with proper reasoning.
+# 1. Choose the BEST value (could be user's value, or from allowed list, or inferred)
+# 2. Always provide a "reason" field explaining your decision. Your reason should be valid and from the product text. Not always exact word to be matched from the product text, you can infer understanding the product text.
+# 3. DO NOT hallucinate - be honest if user's value seems wrong based on product evidence
+# 4. If user's value is not in the allowed list but seems correct, chose the most nearest value from the allowed list with proper reasoning why it chose it. Also in this case give the most apt value that should be added in the possible list in the reason so that user can edit it later.
 # """
 
 #         # --------------------------- PROMPT ---------------------------
@@ -219,14 +223,27 @@
 #         allowed_sources = list(source_map.keys()) + ["title", "description", "inferred"]
 #         source_hint = "|".join(allowed_sources)
 #         multiple_text = f"\nMULTIPLE ALLOWED FOR: {', '.join(multiple)}" if multiple else ""
-        
-#         print("Multiple text for attr: ")
-#         print(multiple_text)
 
-#         additional_instructions = """
-#         For the 'additional' section, identify any other important product attributes and their values (e.g., 'Color', 'Material', 'Weight' etc) that are present in the PRODUCT TEXT but not in the Mandatory Attribute list.
-#         For each additional attribute, use the best available value from the PRODUCT TEXT and specify the 'source'.
-#         """ if extract_additional else ""
+#         if extract_additional:
+#             additional_instructions = """
+# For the 'additional' section, identify any other important product attributes and their values (e.g., 'Color', 'Material', 'Weight' etc according to the product text) that are present in the PRODUCT TEXT but not in the Mandatory Attribute list.
+# For each additional attribute, use the best available value from the PRODUCT TEXT and specify the 'source'.
+# Strictly Extract other key attributes other than mandatory attributes from the text.
+# """
+#             output_example_additional = """
+#   "additional": {
+#     "Additional_Attr_1": [{
+#       "value": "Value 1", 
+#       "source": "<{source_hint}>",
+#       "reason": "Why this attribute and value were identified"
+#     }]
+#   }
+# """
+#         else:
+#             additional_instructions = """
+# Do not identify or include any additional attributes. The 'additional' section must be an empty object {}.
+# """
+#             output_example_additional = '  "additional": {}'
 
 #         prompt = f"""
 # You are a product-attribute classifier and validator.
@@ -257,24 +274,17 @@
 #       "source": "<{source_hint}>",
 #       "reason": "Explanation of why this value was chosen. If user provided a value, explain why you agreed/disagreed with it.",
 #       "original_value": "<user_entered_value_if_provided>",
-#       "decision": "accepted|rejected"
+#       "decision": "accepted|rejected|not_provided"
 #     }}]
 #   }},
-#   "additional": {{
-#     "Additional_Attr_1": [{{
-#       "value": "Value 1", 
-#       "source": "<{source_hint}>",
-#       "reason": "Why this attribute and value were identified"
-#     }}]
-#   }}
+# {output_example_additional}
 # }}
 
 # RULES:
 # - For each mandatory attribute with a user-entered value, include "original_value" and "decision" fields
 # - "decision" values: "accepted" (used user's value), "rejected" (used different value), "not_provided" (no user value given)
 # - "reason" must explain your choice, especially when rejecting user input
-# - For 'additional' attributes: Strictly Extract other key attributes other than mandatory attributes from the text. 
-# - For 'multiple' attributes, always give multiple value for those attribues, choose wisely and max 2 multiple attribute that are very close. 
+# - For 'multiple' attributes, always give multiple values for those attributes, choose wisely and max 2 values per attribute that are very close. 
 # - Source must be one of: {source_hint}
 # - Be honest and specific in your reasoning.
 # - Return ONLY valid JSON
@@ -293,7 +303,6 @@
 #         try:
 #             raw = ProductAttributeService._call_llm(payload)
 #             logger.info("Raw LLM response received")
-#             print(raw)
 #             cleaned = ProductAttributeService._clean_json(raw)
 #             parsed = json.loads(cleaned)
 #         except Exception as exc:
@@ -338,16 +347,7 @@
 
 
 
-
-
-
-
-
-
-
-
-
-
+################## EDITING PROMPT OF ABOVE VERSION ONLY #################
 
 
 
@@ -693,8 +693,6 @@ RULES:
 
 
 
-
-
 # IMPORTANT INSTRUCTIONS FOR USER VALUES:
 # 1. Compare the user-entered value with what you find in the product text
 # 2. Evaluate if the user value is correct, partially correct, or incorrect for this product

+ 340 - 127
attr_extraction/views.py

@@ -1288,9 +1288,208 @@ def generate_product_excel_background():
 
 # -------------------------------------------------------------------------------------------------
 
+
+# THIS DOES NOT DELETE THE RECORD EVEN IF A RECORD IS ABSENT IN THE EXCEL FILE
+# class ProductUploadExcelView(APIView):
+#     """
+#     POST API to upload an Excel file.
+#     """
+#     parser_classes = (MultiPartParser, FormParser)
+
+#     def post(self, request, *args, **kwargs):
+#         file_obj = request.FILES.get('file')
+#         if not file_obj:
+#             return Response({'error': 'No file provided'}, status=status.HTTP_400_BAD_REQUEST)
+
+#         try:
+#             # ... (Upload and DB processing logic remains unchanged)
+            
+#             # Read all sheets from Excel file
+#             excel_file = pd.ExcelFile(file_obj)
+            
+#             # Check if required sheets exist
+#             if 'Products' not in excel_file.sheet_names:
+#                  logger.error(f"Upload failed: Missing 'Products' sheet in file.")
+#                  return Response({
+#                      'error': "Missing 'Products' sheet",
+#                      'available_sheets': excel_file.sheet_names
+#                  }, status=status.HTTP_400_BAD_REQUEST)
+            
+#             df_products = pd.read_excel(excel_file, sheet_name='Products')
+#             df_products.columns = [c.strip().lower().replace(' ', '_') for c in df_products.columns]
+
+#             expected_product_cols = {
+#                  'item_id', 'product_name', 'product_long_description',
+#                  'product_short_description', 'product_type', 'image_path'
+#             }
+
+#             if not expected_product_cols.issubset(df_products.columns):
+#                  logger.error(f"Upload failed: Missing required columns in Products sheet.")
+#                  return Response({
+#                      'error': 'Missing required columns in Products sheet',
+#                      'required_columns': list(expected_product_cols),
+#                      'found_columns': list(df_products.columns)
+#                  }, status=status.HTTP_400_BAD_REQUEST)
+
+#             df_attributes = None
+#             has_attributes_sheet = 'Attribute_values' in excel_file.sheet_names
+            
+#             if has_attributes_sheet:
+#                  df_attributes = pd.read_excel(excel_file, sheet_name='Attribute_values')
+#                  df_attributes.columns = [c.strip().lower().replace(' ', '_') for c in df_attributes.columns]
+                 
+#                  expected_attr_cols = {'item_id', 'attribute_name', 'original_value'}
+#                  if not expected_attr_cols.issubset(df_attributes.columns):
+#                      logger.error(f"Upload failed: Missing required columns in Attribute_values sheet.")
+#                      return Response({
+#                           'error': 'Missing required columns in Attribute_values sheet',
+#                           'required_columns': list(expected_attr_cols),
+#                           'found_columns': list(df_attributes.columns)
+#                      }, status=status.HTTP_400_BAD_REQUEST)
+
+#             products_created = 0
+#             products_updated = 0
+#             attributes_created = 0
+#             attributes_updated = 0
+#             products_failed = 0
+#             attributes_failed = 0
+#             errors = []
+
+#             with transaction.atomic():
+#                  for idx, row in df_products.iterrows():
+#                      item_id = str(row.get('item_id', '')).strip()
+#                      product_type = str(row.get('product_type', '')).strip()
+
+#                      if not item_id:
+#                          products_failed += 1
+#                          errors.append(f"Products Row {idx + 2}: Missing item_id")
+#                          continue
+
+#                      try:
+#                          if product_type:
+#                              ProductType.objects.get_or_create(name=product_type)
+
+#                          defaults = {
+#                              'product_name': str(row.get('product_name', '')),
+#                              'product_long_description': str(row.get('product_long_description', '')),
+#                              'product_short_description': str(row.get('product_short_description', '')),
+#                              'product_type': product_type,
+#                              'image_path': str(row.get('image_path', '')),
+#                          }
+
+#                          obj, created = Product.objects.update_or_create(item_id=item_id, defaults=defaults)
+
+#                          if created: products_created += 1
+#                          else: products_updated += 1
+#                      except Exception as e:
+#                          products_failed += 1
+#                          errors.append(f"Products Row {idx + 2} (item_id: {item_id}): {str(e)}")
+#                          logger.error(f"Error processing product {item_id} in Products sheet: {e}")
+
+
+#                  if has_attributes_sheet and df_attributes is not None:
+#                       item_ids_in_attrs = df_attributes['item_id'].astype(str).unique()
+#                       existing_products = {p.item_id: p for p in Product.objects.filter(item_id__in=item_ids_in_attrs)}
+
+#                       for idx, row in df_attributes.iterrows():
+#                           item_id = str(row.get('item_id', '')).strip()
+#                           attribute_name = str(row.get('attribute_name', '')).strip()
+#                           original_value = str(row.get('original_value', '')).strip()
+
+#                           if not item_id or not attribute_name:
+#                               attributes_failed += 1
+#                               errors.append(f"Attribute_values Row {idx + 2}: Missing item_id or attribute_name")
+#                               continue
+
+#                           product = existing_products.get(item_id)
+#                           if not product:
+#                               attributes_failed += 1
+#                               errors.append(f"Attribute_values Row {idx + 2}: Product with item_id '{item_id}' not found. Make sure it exists in Products sheet.")
+#                               continue
+
+#                           try:
+#                               attr_obj, created = ProductAttributeValue.objects.update_or_create(
+#                                   product=product,
+#                                   attribute_name=attribute_name,
+#                                   defaults={'original_value': original_value}
+#                               )
+#                               if created: attributes_created += 1
+#                               else: attributes_updated += 1
+#                           except Exception as e:
+#                               attributes_failed += 1
+#                               errors.append(f"Attribute_values Row {idx + 2} (item_id: {item_id}, attribute: {attribute_name}): {str(e)}")
+#                               logger.error(f"Error processing attribute {attribute_name} for product {item_id}: {e}")
+
+#             # Prepare response data
+#             response_data = {
+#                 'message': 'Upload completed',
+#                 'products': {
+#                     'created': products_created, 'updated': products_updated, 'failed': products_failed,
+#                     'total_processed': products_created + products_updated + products_failed
+#                 },
+#                 'attribute_values': {
+#                      'created': attributes_created, 'updated': attributes_updated, 'failed': attributes_failed,
+#                      'total_processed': attributes_created + attributes_updated + attributes_failed
+#                 } if has_attributes_sheet else {'message': 'Attribute_values sheet not found in Excel file'},
+#                 'generated_excel_status': 'Excel generation started in the background.'
+#             }
+
+#             if errors:
+#                 response_data['errors'] = errors[:50]
+#                 if len(errors) > 50:
+#                     response_data['errors'].append(f"... and {len(errors) - 50} more errors")
+
+#             upload_status = status.HTTP_201_CREATED if products_failed == 0 and attributes_failed == 0 else status.HTTP_207_MULTI_STATUS
+
+#             # Start background thread for Excel generation if upload was successful
+#             if products_failed == 0 and attributes_failed == 0:
+#                 logger.info("API call successful. Triggering background Excel generation thread is commented for now !!!!.")
+#                 # threading.Thread(target=generate_product_excel_background, daemon=True).start()
+                
+#                 ## FIX: Update monitoring URLs to point to the new generated_outputs subfolder
+#                 # response_data['generated_excel_status'] = 'Background Excel generation triggered successfully.'
+#                 # response_data['monitoring'] = {
+#                 #      'excel_file': os.path.join(OUTPUT_URL, EXCEL_FILE_NAME),
+#                 #      'status_file': os.path.join(OUTPUT_URL, STATUS_FILE_NAME),
+#                 #      'log_file': os.path.join(OUTPUT_URL, LOG_FILE_NAME),
+#                 #      'note': 'These files will be available once the background process completes.'
+#                 # }
+#             else:
+#                  logger.warning(f"API call finished with errors ({products_failed} products, {attributes_failed} attributes). Not triggering background excel generation.")
+#                  response_data['generated_excel_status'] = 'Background Excel generation was NOT triggered due to upload errors. Fix upload errors and re-upload.'
+
+
+#             return Response(response_data, status=upload_status)
+
+#         except pd.errors.EmptyDataError:
+#             logger.error('The uploaded Excel file is empty or invalid.')
+#             return Response({'error': 'The uploaded Excel file is empty or invalid'}, status=status.HTTP_400_BAD_REQUEST)
+#         except Exception as e:
+#             logger.exception(f'An unexpected error occurred while processing the file.')
+#             return Response({'error': f'An unexpected error occurred while processing the file: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+
+
+
+# THIS MAKES THE DB IN SYNC WITH THE EXCEL. IF A PRODUCT IS NOT PRESENT IT GETS DELETED.
+
+from rest_framework.views import APIView
+from rest_framework.response import Response
+from rest_framework import status
+from rest_framework.parsers import MultiPartParser, FormParser
+from django.db import transaction
+import pandas as pd
+import logging
+import os
+# import threading  # Uncomment if you use background excel generation
+from .models import Product, ProductType, ProductAttributeValue
+
+logger = logging.getLogger(__name__)
+
 class ProductUploadExcelView(APIView):
     """
-    POST API to upload an Excel file.
+    POST API to upload an Excel file and synchronize Products & Attributes with DB.
+    If a product is missing in Excel, it will be deleted from the database.
     """
     parser_classes = (MultiPartParser, FormParser)
 
@@ -1300,136 +1499,169 @@ class ProductUploadExcelView(APIView):
             return Response({'error': 'No file provided'}, status=status.HTTP_400_BAD_REQUEST)
 
         try:
-            # ... (Upload and DB processing logic remains unchanged)
-            
-            # Read all sheets from Excel file
+            # Read all sheets
             excel_file = pd.ExcelFile(file_obj)
-            
-            # Check if required sheets exist
+
             if 'Products' not in excel_file.sheet_names:
-                 logger.error(f"Upload failed: Missing 'Products' sheet in file.")
-                 return Response({
-                     'error': "Missing 'Products' sheet",
-                     'available_sheets': excel_file.sheet_names
-                 }, status=status.HTTP_400_BAD_REQUEST)
-            
+                logger.error("Missing 'Products' sheet in uploaded file.")
+                return Response({
+                    'error': "Missing 'Products' sheet",
+                    'available_sheets': excel_file.sheet_names
+                }, status=status.HTTP_400_BAD_REQUEST)
+
             df_products = pd.read_excel(excel_file, sheet_name='Products')
             df_products.columns = [c.strip().lower().replace(' ', '_') for c in df_products.columns]
 
             expected_product_cols = {
-                 'item_id', 'product_name', 'product_long_description',
-                 'product_short_description', 'product_type', 'image_path'
+                'item_id', 'product_name', 'product_long_description',
+                'product_short_description', 'product_type', 'image_path'
             }
 
             if not expected_product_cols.issubset(df_products.columns):
-                 logger.error(f"Upload failed: Missing required columns in Products sheet.")
-                 return Response({
-                     'error': 'Missing required columns in Products sheet',
-                     'required_columns': list(expected_product_cols),
-                     'found_columns': list(df_products.columns)
-                 }, status=status.HTTP_400_BAD_REQUEST)
+                logger.error("Missing required columns in Products sheet.")
+                return Response({
+                    'error': 'Missing required columns in Products sheet',
+                    'required_columns': list(expected_product_cols),
+                    'found_columns': list(df_products.columns)
+                }, status=status.HTTP_400_BAD_REQUEST)
 
-            df_attributes = None
+            # Optional attributes sheet
             has_attributes_sheet = 'Attribute_values' in excel_file.sheet_names
-            
+            df_attributes = None
             if has_attributes_sheet:
-                 df_attributes = pd.read_excel(excel_file, sheet_name='Attribute_values')
-                 df_attributes.columns = [c.strip().lower().replace(' ', '_') for c in df_attributes.columns]
-                 
-                 expected_attr_cols = {'item_id', 'attribute_name', 'original_value'}
-                 if not expected_attr_cols.issubset(df_attributes.columns):
-                     logger.error(f"Upload failed: Missing required columns in Attribute_values sheet.")
-                     return Response({
-                          'error': 'Missing required columns in Attribute_values sheet',
-                          'required_columns': list(expected_attr_cols),
-                          'found_columns': list(df_attributes.columns)
-                     }, status=status.HTTP_400_BAD_REQUEST)
+                df_attributes = pd.read_excel(excel_file, sheet_name='Attribute_values')
+                df_attributes.columns = [c.strip().lower().replace(' ', '_') for c in df_attributes.columns]
+
+                expected_attr_cols = {'item_id', 'attribute_name', 'original_value'}
+                if not expected_attr_cols.issubset(df_attributes.columns):
+                    logger.error("Missing required columns in Attribute_values sheet.")
+                    return Response({
+                        'error': 'Missing required columns in Attribute_values sheet',
+                        'required_columns': list(expected_attr_cols),
+                        'found_columns': list(df_attributes.columns)
+                    }, status=status.HTTP_400_BAD_REQUEST)
 
             products_created = 0
             products_updated = 0
+            products_deleted = 0
             attributes_created = 0
             attributes_updated = 0
+            attributes_deleted = 0
             products_failed = 0
             attributes_failed = 0
             errors = []
 
             with transaction.atomic():
-                 for idx, row in df_products.iterrows():
-                     item_id = str(row.get('item_id', '')).strip()
-                     product_type = str(row.get('product_type', '')).strip()
-
-                     if not item_id:
-                         products_failed += 1
-                         errors.append(f"Products Row {idx + 2}: Missing item_id")
-                         continue
-
-                     try:
-                         if product_type:
-                             ProductType.objects.get_or_create(name=product_type)
-
-                         defaults = {
-                             'product_name': str(row.get('product_name', '')),
-                             'product_long_description': str(row.get('product_long_description', '')),
-                             'product_short_description': str(row.get('product_short_description', '')),
-                             'product_type': product_type,
-                             'image_path': str(row.get('image_path', '')),
-                         }
-
-                         obj, created = Product.objects.update_or_create(item_id=item_id, defaults=defaults)
-
-                         if created: products_created += 1
-                         else: products_updated += 1
-                     except Exception as e:
-                         products_failed += 1
-                         errors.append(f"Products Row {idx + 2} (item_id: {item_id}): {str(e)}")
-                         logger.error(f"Error processing product {item_id} in Products sheet: {e}")
-
-
-                 if has_attributes_sheet and df_attributes is not None:
-                      item_ids_in_attrs = df_attributes['item_id'].astype(str).unique()
-                      existing_products = {p.item_id: p for p in Product.objects.filter(item_id__in=item_ids_in_attrs)}
-
-                      for idx, row in df_attributes.iterrows():
-                          item_id = str(row.get('item_id', '')).strip()
-                          attribute_name = str(row.get('attribute_name', '')).strip()
-                          original_value = str(row.get('original_value', '')).strip()
-
-                          if not item_id or not attribute_name:
-                              attributes_failed += 1
-                              errors.append(f"Attribute_values Row {idx + 2}: Missing item_id or attribute_name")
-                              continue
-
-                          product = existing_products.get(item_id)
-                          if not product:
-                              attributes_failed += 1
-                              errors.append(f"Attribute_values Row {idx + 2}: Product with item_id '{item_id}' not found. Make sure it exists in Products sheet.")
-                              continue
-
-                          try:
-                              attr_obj, created = ProductAttributeValue.objects.update_or_create(
-                                  product=product,
-                                  attribute_name=attribute_name,
-                                  defaults={'original_value': original_value}
-                              )
-                              if created: attributes_created += 1
-                              else: attributes_updated += 1
-                          except Exception as e:
-                              attributes_failed += 1
-                              errors.append(f"Attribute_values Row {idx + 2} (item_id: {item_id}, attribute: {attribute_name}): {str(e)}")
-                              logger.error(f"Error processing attribute {attribute_name} for product {item_id}: {e}")
-
-            # Prepare response data
+                # -------------------------------
+                # 🔥 TRUE SYNC: Delete missing products
+                # -------------------------------
+                existing_item_ids = set(Product.objects.values_list('item_id', flat=True))
+                uploaded_item_ids = set(df_products['item_id'].astype(str))
+                to_delete = existing_item_ids - uploaded_item_ids
+
+                if to_delete:
+                    deleted_count, _ = Product.objects.filter(item_id__in=to_delete).delete()
+                    products_deleted += deleted_count
+                    logger.info(f"Deleted {deleted_count} products missing in Excel.")
+
+                # -------------------------------
+                # ✅ Create or update products
+                # -------------------------------
+                for idx, row in df_products.iterrows():
+                    item_id = str(row.get('item_id', '')).strip()
+                    product_type = str(row.get('product_type', '')).strip()
+
+                    if not item_id:
+                        products_failed += 1
+                        errors.append(f"Products Row {idx + 2}: Missing item_id")
+                        continue
+
+                    try:
+                        if product_type:
+                            ProductType.objects.get_or_create(name=product_type)
+
+                        defaults = {
+                            'product_name': str(row.get('product_name', '')),
+                            'product_long_description': str(row.get('product_long_description', '')),
+                            'product_short_description': str(row.get('product_short_description', '')),
+                            'product_type': product_type,
+                            'image_path': str(row.get('image_path', '')),
+                        }
+
+                        obj, created = Product.objects.update_or_create(item_id=item_id, defaults=defaults)
+                        if created:
+                            products_created += 1
+                        else:
+                            products_updated += 1
+                    except Exception as e:
+                        products_failed += 1
+                        errors.append(f"Products Row {idx + 2} (item_id: {item_id}): {str(e)}")
+                        logger.error(f"Error processing product {item_id}: {e}")
+
+                # -------------------------------
+                # ✅ Handle attributes (optional)
+                # -------------------------------
+                if has_attributes_sheet and df_attributes is not None:
+                    item_ids_in_attrs = df_attributes['item_id'].astype(str).unique()
+                    existing_products = {p.item_id: p for p in Product.objects.filter(item_id__in=item_ids_in_attrs)}
+
+                    # 🔥 TRUE SYNC for attributes: delete attributes linked to deleted products
+                    if to_delete:
+                        deleted_attr_count, _ = ProductAttributeValue.objects.filter(product__item_id__in=to_delete).delete()
+                        attributes_deleted += deleted_attr_count
+                        logger.info(f"Deleted {deleted_attr_count} attributes linked to removed products.")
+
+                    for idx, row in df_attributes.iterrows():
+                        item_id = str(row.get('item_id', '')).strip()
+                        attribute_name = str(row.get('attribute_name', '')).strip()
+                        original_value = str(row.get('original_value', '')).strip()
+
+                        if not item_id or not attribute_name:
+                            attributes_failed += 1
+                            errors.append(f"Attribute_values Row {idx + 2}: Missing item_id or attribute_name")
+                            continue
+
+                        product = existing_products.get(item_id)
+                        if not product:
+                            attributes_failed += 1
+                            errors.append(f"Attribute_values Row {idx + 2}: Product '{item_id}' not found.")
+                            continue
+
+                        try:
+                            attr_obj, created = ProductAttributeValue.objects.update_or_create(
+                                product=product,
+                                attribute_name=attribute_name,
+                                defaults={'original_value': original_value}
+                            )
+                            if created:
+                                attributes_created += 1
+                            else:
+                                attributes_updated += 1
+                        except Exception as e:
+                            attributes_failed += 1
+                            errors.append(f"Attribute_values Row {idx + 2} (item_id: {item_id}, attribute: {attribute_name}): {str(e)}")
+                            logger.error(f"Error processing attribute {attribute_name} for {item_id}: {e}")
+
+            # -------------------------------
+            # ✅ Prepare response
+            # -------------------------------
             response_data = {
-                'message': 'Upload completed',
+                'message': 'Upload and synchronization completed',
                 'products': {
-                    'created': products_created, 'updated': products_updated, 'failed': products_failed,
-                    'total_processed': products_created + products_updated + products_failed
+                    'created': products_created,
+                    'updated': products_updated,
+                    'deleted': products_deleted,
+                    'failed': products_failed,
+                    'total_processed': products_created + products_updated + products_deleted + products_failed
                 },
                 'attribute_values': {
-                     'created': attributes_created, 'updated': attributes_updated, 'failed': attributes_failed,
-                     'total_processed': attributes_created + attributes_updated + attributes_failed
+                    'created': attributes_created,
+                    'updated': attributes_updated,
+                    'deleted': attributes_deleted,
+                    'failed': attributes_failed,
+                    'total_processed': attributes_created + attributes_updated + attributes_deleted + attributes_failed
                 } if has_attributes_sheet else {'message': 'Attribute_values sheet not found in Excel file'},
-                'generated_excel_status': 'Excel generation started in the background.'
+                'generated_excel_status': 'Excel generation skipped (true sync mode).'
             }
 
             if errors:
@@ -1437,34 +1669,16 @@ class ProductUploadExcelView(APIView):
                 if len(errors) > 50:
                     response_data['errors'].append(f"... and {len(errors) - 50} more errors")
 
-            upload_status = status.HTTP_201_CREATED if products_failed == 0 and attributes_failed == 0 else status.HTTP_207_MULTI_STATUS
-
-            # Start background thread for Excel generation if upload was successful
-            if products_failed == 0 and attributes_failed == 0:
-                logger.info("API call successful. Triggering background Excel generation thread is commented for now !!!!.")
-                # threading.Thread(target=generate_product_excel_background, daemon=True).start()
-                
-                ## FIX: Update monitoring URLs to point to the new generated_outputs subfolder
-                # response_data['generated_excel_status'] = 'Background Excel generation triggered successfully.'
-                # response_data['monitoring'] = {
-                #      'excel_file': os.path.join(OUTPUT_URL, EXCEL_FILE_NAME),
-                #      'status_file': os.path.join(OUTPUT_URL, STATUS_FILE_NAME),
-                #      'log_file': os.path.join(OUTPUT_URL, LOG_FILE_NAME),
-                #      'note': 'These files will be available once the background process completes.'
-                # }
-            else:
-                 logger.warning(f"API call finished with errors ({products_failed} products, {attributes_failed} attributes). Not triggering background excel generation.")
-                 response_data['generated_excel_status'] = 'Background Excel generation was NOT triggered due to upload errors. Fix upload errors and re-upload.'
-
-
+            upload_status = status.HTTP_201_CREATED if (products_failed == 0 and attributes_failed == 0) else status.HTTP_207_MULTI_STATUS
             return Response(response_data, status=upload_status)
 
         except pd.errors.EmptyDataError:
             logger.error('The uploaded Excel file is empty or invalid.')
             return Response({'error': 'The uploaded Excel file is empty or invalid'}, status=status.HTTP_400_BAD_REQUEST)
         except Exception as e:
-            logger.exception(f'An unexpected error occurred while processing the file.')
-            return Response({'error': f'An unexpected error occurred while processing the file: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+            logger.exception(f'Unexpected error while processing Excel file.')
+            return Response({'error': f'Unexpected error: {str(e)}'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
 
 
 class DownloadExcelTemplateView(APIView):
@@ -1871,7 +2085,6 @@ class ProductAttributesUploadView(APIView):
             return Response({"error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
 
 
-
 class ProductTypeAttributesView(APIView):
     """
     API to view, create, update, and delete product type attributes and their possible values.