| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523 |
- # #gemini_service.py
- # import google.generativeai as genai
- # import json
- # import logging
- # import re
- # from typing import Dict, List
- # from django.conf import settings
- # from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
- # logger = logging.getLogger(__name__)
- # class GeminiAttributeService:
- # """Service to interact with Google Gemini API for attribute and SEO suggestions"""
-
- # def __init__(self):
- # # Configure Gemini API
- # api_key = getattr(settings, 'GEMINI_API_KEY', None)
- # if not api_key:
- # raise ValueError("GEMINI_API_KEY not found in settings")
- # genai.configure(api_key=api_key)
- # self.model = genai.GenerativeModel('gemini-2.0-flash-exp') # Use latest model
-
- # @retry(
- # stop=stop_after_attempt(3),
- # wait=wait_exponential(multiplier=1, min=2, max=10),
- # retry=retry_if_exception_type(Exception),
- # before_sleep=lambda retry_state: logger.info(f"Retrying Gemini API call, attempt {retry_state.attempt_number}")
- # )
- # def _call_gemini_api(self, prompt, max_tokens=8192):
- # """Helper method to call Gemini API with retry logic"""
- # return self.model.generate_content(
- # prompt,
- # generation_config=genai.types.GenerationConfig(
- # temperature=0.2, # Lower for more consistent JSON
- # top_p=0.9,
- # top_k=40,
- # max_output_tokens=max_tokens, # Increased default
- # response_mime_type="application/json" # Force JSON output
- # ),
- # safety_settings={
- # genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE
- # }
- # )
- # def generate_attribute_suggestions(
- # self,
- # product: Dict,
- # issues: List[str],
- # category_rules: List[Dict]
- # ) -> Dict:
- # """
- # Use Gemini to generate intelligent suggestions for fixing attribute issues
- # Includes SEO-aware recommendations with robust error handling
- # """
- # try:
- # # Limit issues to prevent prompt overflow
- # limited_issues = issues[:15] if len(issues) > 15 else issues
-
- # prompt = self._build_prompt(product, limited_issues, category_rules)
- # response = self._call_gemini_api(prompt, max_tokens=8192)
-
- # # Check if response exists
- # if not response or not response.candidates:
- # logger.error(f"No candidates returned for SKU: {product.get('sku')}")
- # return {
- # 'error': 'No candidates returned by Gemini API',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # candidate = response.candidates[0]
- # finish_reason_name = candidate.finish_reason.name
-
- # # Handle different finish reasons
- # if finish_reason_name == "MAX_TOKENS":
- # logger.warning(f"Max tokens reached for SKU: {product.get('sku')}, attempting partial parse")
- # # Try to parse partial response
- # try:
- # partial_result = self._parse_response(response.text)
- # if partial_result and 'error' not in partial_result:
- # return partial_result
- # except:
- # pass
- # # Retry with fewer issues
- # if len(issues) > 5:
- # logger.info("Retrying with fewer issues")
- # return self.generate_attribute_suggestions(product, issues[:5], category_rules)
- # else:
- # return {
- # 'error': 'Response too long, using fallback',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # elif finish_reason_name in ("SAFETY", "RECITATION", "OTHER"):
- # logger.error(f"Response blocked by {finish_reason_name} for SKU: {product.get('sku')}")
- # return {
- # 'error': f'Response blocked by {finish_reason_name} filters',
- # 'safety_ratings': [
- # {'category': str(r.category), 'probability': str(r.probability)}
- # for r in candidate.safety_ratings
- # ],
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # elif finish_reason_name != "STOP":
- # logger.warning(f"Unexpected finish reason: {finish_reason_name}")
- # return {
- # 'error': f'Unexpected finish reason: {finish_reason_name}',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # # Parse successful response
- # logger.info(f"Successfully received response for SKU: {product.get('sku')}")
- # suggestions = self._parse_response(response.text)
-
- # if 'error' in suggestions:
- # logger.warning(f"Parse error for SKU: {product.get('sku')}, using fallback")
- # suggestions['fallback_suggestions'] = self._generate_fallback_suggestions(limited_issues)
-
- # return suggestions
-
- # except Exception as e:
- # logger.error(f"Gemini API error for SKU {product.get('sku')}: {str(e)}", exc_info=True)
- # return {
- # 'error': str(e),
- # 'fallback_suggestions': self._generate_fallback_suggestions(issues[:10])
- # }
- # def _build_prompt(self, product: Dict, issues: List[str], rules: List[Dict]) -> str:
- # """Build a concise, structured prompt for Gemini with SEO awareness"""
- # mandatory_attrs = [r['attribute_name'] for r in rules if r.get('is_mandatory')]
- # valid_values_map = {
- # r['attribute_name']: r.get('valid_values', [])[:5] # Limit to 5 values
- # for r in rules if r.get('valid_values')
- # }
-
- # # Sanitize and categorize issues
- # cleaned_issues = [
- # issue.replace("suspiciously short", "short value")
- # .replace("not recognized", "invalid")
- # .replace("likely means", "should be")
- # .replace("not clearly mentioned", "missing")
- # for issue in issues
- # ]
-
- # seo_issues = [i for i in cleaned_issues if i.startswith("SEO:")][:5]
- # attribute_issues = [i for i in cleaned_issues if not i.startswith("SEO:")][:8]
-
- # # Shortened prompt
- # prompt = f"""Analyze this e-commerce product and provide JSON suggestions.
- # PRODUCT:
- # SKU: {product.get('sku')}
- # Category: {product.get('category')}
- # Title: {product.get('title', '')[:200]}
- # Description: {product.get('description', '')[:300]}
- # Attributes: {json.dumps(product.get('attributes', {}), ensure_ascii=False)}
- # RULES:
- # Mandatory: {', '.join(mandatory_attrs)}
- # Valid Values: {json.dumps(valid_values_map, ensure_ascii=False)}
- # ISSUES ({len(attribute_issues)} attribute, {len(seo_issues)} SEO):
- # {chr(10).join(f"• {i}" for i in attribute_issues[:8])}
- # {chr(10).join(f"• {i}" for i in seo_issues[:5])}
- # Return ONLY this JSON structure (no markdown, no explanation):
- # {{
- # "corrected_attributes": {{"attr": "value"}},
- # "missing_attributes": {{"attr": "value"}},
- # "seo_optimizations": {{
- # "optimized_title": "50-100 char title",
- # "optimized_description": "50-150 word description",
- # "recommended_keywords": ["kw1", "kw2", "kw3"]
- # }},
- # "improvements": [
- # {{"issue": "...", "suggestion": "...", "confidence": "high/medium/low", "type": "attribute/seo"}}
- # ],
- # "quality_score_prediction": 85,
- # "reasoning": "Brief explanation"
- # }}
- # IMPORTANT: Keep response under 6000 tokens. Prioritize top 3 most critical improvements."""
- # return prompt
- # def _parse_response(self, response_text: str) -> Dict:
- # """Enhanced JSON parsing with multiple fallback strategies"""
- # if not response_text or not response_text.strip():
- # return {'error': 'Empty response from API'}
-
- # try:
- # # Strategy 1: Direct JSON parse (works with response_mime_type="application/json")
- # try:
- # parsed = json.loads(response_text)
- # logger.info("Successfully parsed JSON directly")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Strategy 2: Remove markdown code blocks
- # cleaned = response_text.strip()
- # if '```' in cleaned:
- # # Extract content between code blocks
- # match = re.search(r'```(?:json)?\s*(\{.*\})\s*```', cleaned, re.DOTALL)
- # if match:
- # cleaned = match.group(1)
- # else:
- # # Remove all code block markers
- # cleaned = re.sub(r'```(?:json)?', '', cleaned).strip()
-
- # # Strategy 3: Find first { and last }
- # first_brace = cleaned.find('{')
- # last_brace = cleaned.rfind('}')
-
- # if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
- # cleaned = cleaned[first_brace:last_brace + 1]
-
- # # Strategy 4: Try parsing cleaned JSON
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after cleaning")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.warning(f"JSON parse error at position {e.pos}: {e.msg}")
-
- # # Strategy 5: Attempt to fix common JSON issues
- # cleaned = self._fix_json_syntax(cleaned)
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after syntax fixes")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Strategy 6: Extract partial valid JSON
- # partial_json = self._extract_partial_json(cleaned)
- # if partial_json:
- # logger.warning("Using partial JSON response")
- # return partial_json
-
- # # All strategies failed
- # logger.error(f"All JSON parsing strategies failed. Response length: {len(response_text)}")
- # logger.error(f"Response preview: {response_text[:500]}...")
-
- # return {
- # 'error': 'Failed to parse AI response',
- # 'raw_response': response_text[:1000], # Limit size
- # 'parse_attempts': 6
- # }
-
- # except Exception as e:
- # logger.error(f"Unexpected error in _parse_response: {e}", exc_info=True)
- # return {
- # 'error': f'Parse exception: {str(e)}',
- # 'raw_response': response_text[:500] if response_text else 'None'
- # }
-
- # def _fix_json_syntax(self, json_str: str) -> str:
- # """Attempt to fix common JSON syntax issues"""
- # try:
- # # Remove trailing commas before closing braces/brackets
- # json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
-
- # # Fix unescaped quotes in strings (simple heuristic)
- # # This is risky but can help in some cases
- # json_str = re.sub(r'(?<!\\)"(?=[^,:}\]]*[,:}\]])', '\\"', json_str)
-
- # # Remove any trailing content after final }
- # last_brace = json_str.rfind('}')
- # if last_brace != -1:
- # json_str = json_str[:last_brace + 1]
-
- # return json_str
- # except:
- # return json_str
-
- # def _extract_partial_json(self, json_str: str) -> Dict:
- # """Extract valid partial JSON by finding complete objects"""
- # try:
- # # Try to find complete nested structures
- # depth = 0
- # start_idx = json_str.find('{')
- # if start_idx == -1:
- # return None
-
- # for i in range(start_idx, len(json_str)):
- # if json_str[i] == '{':
- # depth += 1
- # elif json_str[i] == '}':
- # depth -= 1
- # if depth == 0:
- # # Found complete JSON object
- # try:
- # return json.loads(json_str[start_idx:i+1])
- # except:
- # continue
-
- # return None
- # except:
- # return None
- # def _generate_fallback_suggestions(self, issues: List[str]) -> List[Dict]:
- # """Generate enhanced fallback suggestions based on issues"""
- # suggestions = []
-
- # # Group similar issues
- # issue_categories = {
- # 'missing': [],
- # 'invalid': [],
- # 'seo': [],
- # 'other': []
- # }
-
- # for issue in issues:
- # if 'missing' in issue.lower() or 'mandatory' in issue.lower():
- # issue_categories['missing'].append(issue)
- # elif 'invalid' in issue.lower() or 'not in valid' in issue.lower():
- # issue_categories['invalid'].append(issue)
- # elif issue.startswith('SEO:'):
- # issue_categories['seo'].append(issue)
- # else:
- # issue_categories['other'].append(issue)
-
- # # Generate consolidated suggestions
- # for category, category_issues in issue_categories.items():
- # if not category_issues:
- # continue
-
- # for issue in category_issues[:5]: # Limit to 5 per category
- # suggestion = "Review and correct this issue"
- # confidence = "medium"
- # issue_type = "seo" if category == 'seo' else "attribute"
-
- # # Specific suggestions
- # if "Missing mandatory field" in issue:
- # attr = issue.split(":")[-1].strip()
- # suggestion = f"Add {attr} - check product details or title/description"
- # confidence = "high"
- # elif "not in valid values" in issue or "invalid" in issue.lower():
- # suggestion = "Use one of the valid values from category rules"
- # confidence = "high"
- # elif "placeholder" in issue.lower():
- # suggestion = "Replace with actual product data"
- # confidence = "high"
- # elif "too short" in issue.lower():
- # if "title" in issue.lower():
- # suggestion = "Expand to 50-100 characters with key attributes"
- # confidence = "high"
- # issue_type = "seo"
- # elif "description" in issue.lower():
- # suggestion = "Expand to 50-150 words with details"
- # confidence = "high"
- # issue_type = "seo"
- # else:
- # suggestion = "Provide more detailed information"
- # confidence = "medium"
- # elif "keyword" in issue.lower() or "search term" in issue.lower():
- # suggestion = "Add relevant keywords to improve discoverability"
- # confidence = "medium"
- # issue_type = "seo"
-
- # suggestions.append({
- # 'issue': issue,
- # 'suggestion': suggestion,
- # 'confidence': confidence,
- # 'type': issue_type,
- # 'category': category
- # })
-
- # return suggestions[:15] # Return top 15 suggestions
-
- # def extract_attributes_with_ai(self, title: str, description: str, category: str) -> Dict:
- # """
- # Use Gemini to extract attributes from unstructured text
- # """
- # try:
- # prompt = f"""Extract product attributes from this text. Return ONLY valid JSON.
- # Category: {category}
- # Title: {title[:200]}
- # Description: {description[:400]}
- # Return format:
- # {{
- # "brand": "value or null",
- # "color": "value or null",
- # "size": "value or null",
- # "material": "value or null",
- # "model": "value or null"
- # }}"""
- # response = self._call_gemini_api(prompt, max_tokens=1024)
-
- # if not response or not response.candidates:
- # return {'error': 'No response'}
-
- # return self._parse_response(response.text)
-
- # except Exception as e:
- # logger.error(f"AI extraction error: {str(e)}")
- # return {'error': str(e)}
- # # gemini_service_enhanced.py
- # """
- # Enhanced Gemini service with comprehensive suggestions for all components
- # """
- # import google.generativeai as genai
- # import json
- # import logging
- # import re
- # from typing import Dict, List
- # from django.conf import settings
- # from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
- # logger = logging.getLogger(__name__)
- # class GeminiAttributeService:
- # """Enhanced service with comprehensive AI suggestions"""
-
- # def __init__(self):
- # api_key = getattr(settings, 'GEMINI_API_KEY', None)
- # if not api_key:
- # raise ValueError("GEMINI_API_KEY not found in settings")
- # genai.configure(api_key=api_key)
- # self.model = genai.GenerativeModel('gemini-2.5-flash')
-
- # @retry(
- # stop=stop_after_attempt(3),
- # wait=wait_exponential(multiplier=1, min=2, max=10),
- # retry=retry_if_exception_type(Exception)
- # )
- # def _call_gemini_api(self, prompt, max_tokens=8192):
- # """Helper method to call Gemini API with retry logic"""
- # try:
- # return self.model.generate_content(
- # prompt,
- # generation_config=genai.types.GenerationConfig(
- # temperature=0.2,
- # top_p=0.9,
- # top_k=40,
- # max_output_tokens=max_tokens,
- # response_mime_type="application/json"
- # ),
- # safety_settings={
- # genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE
- # }
- # )
- # # except genai.types.GenerationError as e:
- # # # Handle specific generation errors
- # # print("Generation error:", str(e))
- # # return None
- # # # return {"error": "Content generation failed", "details": str(e)}
- # except Exception as e:
- # # Catch-all for any other unexpected errors
- # print("Unexpected error:", str(e))
- # return None
- # # return {"error": "Unexpected error occurred", "details": str(e)}
-
- # def generate_comprehensive_suggestions(
- # self,
- # product: Dict,
- # issues: List[str],
- # category_rules: List[Dict],
- # scores: Dict
- # ) -> Dict:
- # """
- # Generate comprehensive AI suggestions covering ALL quality aspects
- # """
- # try:
- # limited_issues = issues[:20] if len(issues) > 20 else issues
-
- # prompt = self._build_comprehensive_prompt(product, limited_issues, category_rules, scores)
- # response = self._call_gemini_api(prompt, max_tokens=8192)
- # # print("response",response)
- # if not response or not response.candidates:
- # logger.error(f"No candidates returned for SKU: {product.get('sku')}")
- # return {
- # 'error': 'No response from AI',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # candidate = response.candidates[0]
- # finish_reason = candidate.finish_reason.name
-
- # if finish_reason != "STOP":
- # logger.warning(f"Non-STOP finish reason: {finish_reason}")
- # if finish_reason == "MAX_TOKENS" and len(issues) > 10:
- # return self.generate_comprehensive_suggestions(product, issues[:10], category_rules, scores)
- # return {
- # 'error': f'Response blocked: {finish_reason}',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # logger.info(f"Successfully received comprehensive suggestions for SKU: {product.get('sku')}")
- # suggestions = self._parse_response(response.text)
-
- # if 'error' in suggestions:
- # suggestions['fallback_suggestions'] = self._generate_fallback_suggestions(limited_issues)
-
- # return suggestions
-
- # except Exception as e:
- # logger.error(f"Gemini API error: {str(e)}", exc_info=True)
- # return {
- # 'error': str(e),
- # 'fallback_suggestions': self._generate_fallback_suggestions(issues[:15])
- # }
-
- # def _build_comprehensive_prompt(
- # self,
- # product: Dict,
- # issues: List[str],
- # rules: List[Dict],
- # scores: Dict
- # ) -> str:
- # """Build comprehensive prompt for all quality aspects"""
- # print("_build_comprehensive_prompt",product,issues,rules,scores)
- # mandatory_attrs = [r['attribute_name'] for r in rules if r.get('is_mandatory')]
- # valid_values_map = {
- # r['attribute_name']: r.get('valid_values', [])[:5]
- # for r in rules if r.get('valid_values')
- # }
-
- # # Categorize issues
- # attribute_issues = [i for i in issues if not any(prefix in i for prefix in ['Title:', 'Description:', 'SEO:'])]
- # title_issues = [i for i in issues if i.startswith('Title:')]
- # desc_issues = [i for i in issues if i.startswith('Description:')]
- # seo_issues = [i for i in issues if i.startswith('SEO:')]
- # import random
- # a = random.uniform(90.2,95.9)
- # print("prompt start")
- # prompt = f"""Analyze this e-commerce product and provide comprehensive quality improvements.
- # Note: quality_score_prediction should be in range of 90 to 95
- # PRODUCT DATA:
- # SKU: {product.get('sku')}
- # Category: {product.get('category')}
- # Title: {product.get('title', '')[:250]}
- # Description: {product.get('description', '')[:400]}
- # Attributes: {json.dumps(product.get('attributes', {}), ensure_ascii=False)}
- # QUALITY SCORES (out of 100):
- # - Mandatory Fields: {scores.get('mandatory_fields', 0):.1f}
- # - Standardization: {scores.get('standardization', 0):.1f}
- # - Missing Values: {scores.get('missing_values', 0):.1f}
- # - Consistency: {scores.get('consistency', 0):.1f}
- # - SEO: {scores.get('seo_discoverability', 0):.1f}
- # - Title Quality: {scores.get('title_quality', 0):.1f}
- # - Description Quality: {scores.get('description_quality', 0):.1f}
- # CATEGORY RULES:
- # Mandatory Attributes: {', '.join(mandatory_attrs)}
- # Valid Values: {json.dumps(valid_values_map, ensure_ascii=False)}
- # ISSUES FOUND:
- # Attributes ({len(attribute_issues)}):
- # {chr(10).join(f" • {i}" for i in attribute_issues[:8])}
- # Title ({len(title_issues)}):
- # {chr(10).join(f" • {i}" for i in title_issues[:5])}
- # Description ({len(desc_issues)}):
- # {chr(10).join(f" • {i}" for i in desc_issues[:5])}
- # SEO ({len(seo_issues)}):
- # {chr(10).join(f" • {i}" for i in seo_issues[:5])}
- # Return ONLY this JSON structure:
- # {{
- # "corrected_attributes": {{
- # "attr_name": "corrected_value"
- # }},
- # "missing_attributes": {{
- # "attr_name": "suggested_value"
- # }},
- # "improved_title": "optimized title (50-100 chars, includes brand, model, key features)",
- # "improved_description": "enhanced description (50-150 words, features, benefits, specs, use cases)",
- # "seo_keywords": ["keyword1", "keyword2", "keyword3"],
- # "improvements": [
- # {{
- # "component": "attributes/title/description/seo",
- # "issue": "specific issue",
- # "suggestion": "how to fix",
- # "priority": "high/medium/low",
- # "confidence": "high/medium/low"
- # }}
- # ],
- # "quality_score_prediction": {a:.1f},
- # "summary": "Brief 2-3 sentence summary of key improvements needed"
- # }}
- # CRITICAL: Keep response under 7000 tokens. Focus on top 5 most impactful improvements."""
- # print("prompt",prompt)
- # return prompt
-
- # def _parse_response(self, response_text: str) -> Dict:
- # """Enhanced JSON parsing with fallback strategies"""
- # if not response_text or not response_text.strip():
- # return {'error': 'Empty response from API'}
-
- # try:
- # # Direct JSON parse
- # try:
- # parsed = json.loads(response_text)
- # logger.info("Successfully parsed JSON directly")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Remove markdown code blocks
- # cleaned = response_text.strip()
- # if '```' in cleaned:
- # match = re.search(r'```(?:json)?\s*(\{.*\})\s*```', cleaned, re.DOTALL)
- # if match:
- # cleaned = match.group(1)
- # else:
- # cleaned = re.sub(r'```(?:json)?', '', cleaned).strip()
-
- # # Find first { and last }
- # first_brace = cleaned.find('{')
- # last_brace = cleaned.rfind('}')
-
- # if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
- # cleaned = cleaned[first_brace:last_brace + 1]
-
- # # Try parsing cleaned JSON
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after cleaning")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.warning(f"JSON parse error: {e}")
-
- # # Fix common JSON issues
- # cleaned = self._fix_json_syntax(cleaned)
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after syntax fixes")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Extract partial valid JSON
- # partial_json = self._extract_partial_json(cleaned)
- # if partial_json:
- # logger.warning("Using partial JSON response")
- # return partial_json
-
- # logger.error(f"All JSON parsing failed. Response length: {len(response_text)}")
- # return {
- # 'error': 'Failed to parse AI response',
- # 'raw_response': response_text[:500]
- # }
-
- # except Exception as e:
- # logger.error(f"Parse exception: {e}", exc_info=True)
- # return {
- # 'error': f'Parse exception: {str(e)}',
- # 'raw_response': response_text[:500] if response_text else 'None'
- # }
-
- # def _fix_json_syntax(self, json_str: str) -> str:
- # """Fix common JSON syntax issues"""
- # try:
- # # Remove trailing commas
- # json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
-
- # # Remove trailing content after final }
- # last_brace = json_str.rfind('}')
- # if last_brace != -1:
- # json_str = json_str[:last_brace + 1]
-
- # return json_str
- # except:
- # return json_str
-
- # def _extract_partial_json(self, json_str: str) -> Dict:
- # """Extract valid partial JSON"""
- # try:
- # depth = 0
- # start_idx = json_str.find('{')
- # if start_idx == -1:
- # return None
-
- # for i in range(start_idx, len(json_str)):
- # if json_str[i] == '{':
- # depth += 1
- # elif json_str[i] == '}':
- # depth -= 1
- # if depth == 0:
- # try:
- # return json.loads(json_str[start_idx:i+1])
- # except:
- # continue
- # return None
- # except:
- # return None
-
- # def _generate_fallback_suggestions(self, issues: List[str]) -> List[Dict]:
- # """Generate fallback suggestions based on issues"""
- # suggestions = []
-
- # for issue in issues[:15]:
- # suggestion_text = "Review and correct this issue"
- # confidence = "medium"
- # component = "attribute"
- # priority = "medium"
-
- # issue_lower = issue.lower()
-
- # # Determine component
- # if issue.startswith('Title:'):
- # component = "title"
- # elif issue.startswith('Description:'):
- # component = "description"
- # elif issue.startswith('SEO:'):
- # component = "seo"
-
- # # Specific suggestions
- # if "missing mandatory" in issue_lower:
- # attr = issue.split(":")[-1].strip()
- # suggestion_text = f"Add required {attr} - check product packaging or manufacturer details"
- # priority = "high"
- # confidence = "high"
- # elif "too short" in issue_lower:
- # if "title" in issue_lower:
- # suggestion_text = "Expand title to 50-100 characters including brand, model, and key features"
- # component = "title"
- # priority = "high"
- # elif "description" in issue_lower:
- # suggestion_text = "Write comprehensive 50-150 word description with features, benefits, and specifications"
- # component = "description"
- # priority = "high"
- # else:
- # suggestion_text = "Provide more detailed information"
- # elif "placeholder" in issue_lower:
- # suggestion_text = "Replace with actual product data from manufacturer or packaging"
- # priority = "high"
- # elif "grammar" in issue_lower or "spelling" in issue_lower:
- # suggestion_text = "Run spell-check and grammar review, ensure professional language"
- # component = "description"
- # priority = "medium"
- # elif "keyword" in issue_lower or "seo" in issue_lower:
- # suggestion_text = "Add relevant search keywords and product attributes"
- # component = "seo"
- # priority = "medium"
- # elif "duplicate" in issue_lower or "repetit" in issue_lower:
- # suggestion_text = "Remove duplicate content, provide varied information with unique details"
- # component = "description"
- # priority = "medium"
- # elif "not recognized" in issue_lower or "invalid" in issue_lower:
- # suggestion_text = "Use standardized values from category rules"
- # priority = "high"
- # confidence = "high"
-
- # suggestions.append({
- # 'component': component,
- # 'issue': issue,
- # 'suggestion': suggestion_text,
- # 'priority': priority,
- # 'confidence': confidence
- # })
-
- # return suggestions
- # # gemini_service_enhanced.py
- # """
- # Enhanced Gemini service with comprehensive suggestions for all components
- # """
- # import google.generativeai as genai
- # import json
- # import logging
- # import re
- # from typing import Dict, List
- # from django.conf import settings
- # from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
- # logger = logging.getLogger(__name__)
- # class GeminiAttributeService:
- # """Enhanced service with comprehensive AI suggestions"""
-
- # def __init__(self):
- # api_key = getattr(settings, 'GEMINI_API_KEY', None)
- # if not api_key:
- # raise ValueError("GEMINI_API_KEY not found in settings")
- # genai.configure(api_key=api_key)
- # self.model = genai.GenerativeModel('gemini-2.5-flash')
-
- # @retry(
- # stop=stop_after_attempt(3),
- # wait=wait_exponential(multiplier=1, min=2, max=10),
- # retry=retry_if_exception_type(Exception)
- # )
- # def _call_gemini_api(self, prompt, max_tokens=8192):
- # """Helper method to call Gemini API with retry logic"""
- # try:
- # return self.model.generate_content(
- # prompt,
- # generation_config=genai.types.GenerationConfig(
- # temperature=0.2,
- # top_p=0.9,
- # top_k=40,
- # max_output_tokens=max_tokens,
- # response_mime_type="application/json"
- # ),
- # safety_settings={
- # genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE
- # }
- # )
- # # except genai.types.GenerationError as e:
- # # # Handle specific generation errors
- # # print("Generation error:", str(e))
- # # return None
- # # # return {"error": "Content generation failed", "details": str(e)}
- # except Exception as e:
- # # Catch-all for any other unexpected errors
- # print("Unexpected error:", str(e))
- # return None
- # # return {"error": "Unexpected error occurred", "details": str(e)}
-
- # def generate_comprehensive_suggestions(
- # self,
- # product: Dict,
- # issues: List[str],
- # category_rules: List[Dict],
- # scores: Dict
- # ) -> Dict:
- # """
- # Generate comprehensive AI suggestions covering ALL quality aspects
- # """
- # try:
- # limited_issues = issues[:20] if len(issues) > 20 else issues
-
- # prompt = self._build_comprehensive_prompt(product, limited_issues, category_rules, scores)
- # response = self._call_gemini_api(prompt, max_tokens=8192)
- # # print("response",response)
- # if not response or not response.candidates:
- # logger.error(f"No candidates returned for SKU: {product.get('sku')}")
- # return {
- # 'error': 'No response from AI',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # candidate = response.candidates[0]
- # finish_reason = candidate.finish_reason.name
-
- # if finish_reason != "STOP":
- # logger.warning(f"Non-STOP finish reason: {finish_reason}")
- # if finish_reason == "MAX_TOKENS" and len(issues) > 10:
- # return self.generate_comprehensive_suggestions(product, issues[:10], category_rules, scores)
- # return {
- # 'error': f'Response blocked: {finish_reason}',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
-
- # logger.info(f"Successfully received comprehensive suggestions for SKU: {product.get('sku')}")
- # suggestions = self._parse_response(response.text)
-
- # if 'error' in suggestions:
- # suggestions['fallback_suggestions'] = self._generate_fallback_suggestions(limited_issues)
-
- # return suggestions
-
- # except Exception as e:
- # logger.error(f"Gemini API error: {str(e)}", exc_info=True)
- # return {
- # 'error': str(e),
- # 'fallback_suggestions': self._generate_fallback_suggestions(issues[:15])
- # }
-
- # def _build_comprehensive_prompt(
- # self,
- # product: Dict,
- # issues: List[str],
- # rules: List[Dict],
- # scores: Dict
- # ) -> str:
- # """Build comprehensive prompt for all quality aspects"""
- # print("_build_comprehensive_prompt",product,issues,rules,scores)
- # mandatory_attrs = [r['attribute_name'] for r in rules if r.get('is_mandatory')]
- # valid_values_map = {
- # r['attribute_name']: r.get('valid_values', [])[:5]
- # for r in rules if r.get('valid_values')
- # }
-
- # # Categorize issues
- # attribute_issues = [i for i in issues if not any(prefix in i for prefix in ['Title:', 'Description:', 'SEO:'])]
- # title_issues = [i for i in issues if i.startswith('Title:')]
- # desc_issues = [i for i in issues if i.startswith('Description:')]
- # seo_issues = [i for i in issues if i.startswith('SEO:')]
- # import random
- # a = random.uniform(90.2,95.9)
- # print("prompt start")
- # prompt = f"""Analyze this e-commerce product and provide comprehensive quality improvements.
- # Note: quality_score_prediction should be in range of 90 to 95
- # PRODUCT DATA:
- # SKU: {product.get('sku')}
- # Category: {product.get('category')}
- # Title: {product.get('title', '')[:250]}
- # Description: {product.get('description', '')[:400]}
- # Attributes: {json.dumps(product.get('attributes', {}), ensure_ascii=False)}
- # QUALITY SCORES (out of 100):
- # - Mandatory Fields: {scores.get('mandatory_fields', 0):.1f}
- # - Standardization: {scores.get('standardization', 0):.1f}
- # - Missing Values: {scores.get('missing_values', 0):.1f}
- # - Consistency: {scores.get('consistency', 0):.1f}
- # - SEO: {scores.get('seo_discoverability', 0):.1f}
- # - Title Quality: {scores.get('title_quality', 0):.1f}
- # - Description Quality: {scores.get('description_quality', 0):.1f}
- # CATEGORY RULES:
- # Mandatory Attributes: {', '.join(mandatory_attrs)}
- # Valid Values: {json.dumps(valid_values_map, ensure_ascii=False)}
- # ISSUES FOUND:
- # Attributes ({len(attribute_issues)}):
- # {chr(10).join(f" • {i}" for i in attribute_issues[:8])}
- # Title ({len(title_issues)}):
- # {chr(10).join(f" • {i}" for i in title_issues[:5])}
- # Description ({len(desc_issues)}):
- # {chr(10).join(f" • {i}" for i in desc_issues[:5])}
- # SEO ({len(seo_issues)}):
- # {chr(10).join(f" • {i}" for i in seo_issues[:5])}
- # The product belongs to one of these categories: T-Shirts, Food, Chairs. Treat each category as a separate dataset and apply the following category-specific best practices when generating improved_title, improved_description, and other suggestions. Match the guidelines to the product's category.
- # CATEGORY-SPECIFIC GUIDELINES:
- # For T-Shirts:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Gender + Product Type + Key Feature + Material + Size + Color + Pack Size.
- # - Explanations: Brand builds trust and SEO; Gender targets audience; Product Type is core for discoverability; Key Feature highlights benefits like 'Slim Fit'; Material adds specificity for search; Size and Color improve conversion by matching user intent; Pack Size for value packs.
- # - Examples: "Nike Men's Slim Fit Cotton T-Shirt, Black, Large" or "Hanes Women's V-Neck Polyester Blend T-Shirt Pack of 3, White, Medium".
- # - Common pitfalls: Overly long titles (>150 chars), missing brand or size, using all caps, irrelevant keywords.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Casual and engaging, 150-300 words.
- # - Structure: Short intro paragraph on style and comfort, followed by 3-5 bullet points on features/benefits (e.g., fabric, fit, durability).
- # - Keywords and SEO: Include terms like 'breathable cotton t-shirt', 'men's graphic tee'; front-load keywords.
- # - Examples: Effective - "This Nike t-shirt offers ultimate comfort with soft cotton fabric. Features: - Breathable material - Slim fit design - Machine washable"; Ineffective - Generic placeholders like "Good t-shirt".
- # - Do’s: Use sensory language (soft, comfortable); Don’ts: Avoid hype without facts, no spelling errors.
- # For Food:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Product Name + Flavor/Variety + Size/Weight + Type (e.g., Organic, Gluten-Free) + Pack Size.
- # - Explanations: Brand for recognition; Product Name for core identity; Flavor for appeal and search; Size/Weight for practicality; Type boosts SEO for dietary needs; Pack Size for bulk buyers.
- # - Examples: "Kellogg's Corn Flakes Cereal, Original Flavor, 18 oz Box" or "Organic Valley Whole Milk, 1 Gallon, Grass-Fed".
- # - Common pitfalls: Vague flavors, missing allergens, excessive adjectives, not including weight.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Appetizing and informative, 200-400 words.
- # - Structure: Intro on taste and origin, followed by 3-5 bullet points on ingredients, nutrition, serving suggestions.
- # - Keywords and SEO: Include 'organic snacks', 'low-carb food'; natural integration.
- # - Examples: Effective - "Enjoy the crisp taste of Kellogg's Corn Flakes. Ingredients: Corn, sugar... Benefits: - High in fiber - Quick breakfast option"; Ineffective - Short and bland like "Cereal in box".
- # - Do’s: Highlight health benefits; Don’ts: No false claims, avoid listing only ingredients without context.
- # For Chairs:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Type (e.g., Office Chair) + Key Feature (e.g., Ergonomic) + Material + Color + Additional Features (e.g., Adjustable).
- # - Explanations: Brand for quality assurance; Type for category search; Key Feature for differentiation; Material for durability info; Color for aesthetics; Additional Features improve conversion.
- # - Examples: "Herman Miller Aeron Ergonomic Office Chair, Mesh Fabric, Black, Adjustable Arms" or "IKEA Markus Swivel Desk Chair, Leather, Gray, High Back".
- # - Common pitfalls: Too generic (e.g., "Chair"), missing dimensions, overloading with features.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Professional and detailed, 250-500 words.
- # - Structure: Intro on comfort and use, followed by 3-5 bullet points on features/benefits (e.g., ergonomics, assembly, warranty).
- # - Keywords and SEO: Include 'ergonomic office chair', 'adjustable desk chair'; target user pain points.
- # - Examples: Effective - "The Herman Miller Aeron provides superior back support. Features: - Breathable mesh - Adjustable height - 12-year warranty"; Ineffective - Vague like "Nice chair for sitting".
- # - Do’s: Include dimensions and weight capacity; Don’ts: No unverified claims, avoid technical jargon without explanation.
- # Return ONLY this JSON structure:
- # {{
- # "corrected_attributes": {{
- # "attr_name": "corrected_value"
- # }},
- # "missing_attributes": {{
- # "attr_name": "suggested_value"
- # }},
- # "improved_title": "optimized title (50-100 chars, includes brand, model, key features)",
- # "improved_description": "enhanced description (50-150 words, features, benefits, specs, use cases)",
- # "seo_keywords": ["keyword1", "keyword2", "keyword3"],
- # "improvements": [
- # {{
- # "component": "attributes/title/description/seo",
- # "issue": "specific issue",
- # "suggestion": "how to fix",
- # "priority": "high/medium/low",
- # "confidence": "high/medium/low"
- # }}
- # ],
- # "quality_score_prediction": {a:.1f},
- # "summary": "Brief 2-3 sentence summary of key improvements needed"
- # }}
- # CRITICAL: Keep response under 7000 tokens. Focus on top 5 most impactful improvements."""
- # print("prompt",prompt)
- # return prompt
-
- # def _parse_response(self, response_text: str) -> Dict:
- # """Enhanced JSON parsing with fallback strategies"""
- # if not response_text or not response_text.strip():
- # return {'error': 'Empty response from API'}
-
- # try:
- # # Direct JSON parse
- # try:
- # parsed = json.loads(response_text)
- # logger.info("Successfully parsed JSON directly")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Remove markdown code blocks
- # cleaned = response_text.strip()
- # if '```' in cleaned:
- # match = re.search(r'```(?:json)?\s*(\{.*\})\s*```', cleaned, re.DOTALL)
- # if match:
- # cleaned = match.group(1)
- # else:
- # cleaned = re.sub(r'```(?:json)?', '', cleaned).strip()
-
- # # Find first { and last }
- # first_brace = cleaned.find('{')
- # last_brace = cleaned.rfind('}')
-
- # if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
- # cleaned = cleaned[first_brace:last_brace + 1]
-
- # # Try parsing cleaned JSON
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after cleaning")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.warning(f"JSON parse error: {e}")
-
- # # Fix common JSON issues
- # cleaned = self._fix_json_syntax(cleaned)
- # try:
- # parsed = json.loads(cleaned)
- # logger.info("Successfully parsed JSON after syntax fixes")
- # return parsed
- # except json.JSONDecodeError:
- # pass
-
- # # Extract partial valid JSON
- # partial_json = self._extract_partial_json(cleaned)
- # if partial_json:
- # logger.warning("Using partial JSON response")
- # return partial_json
-
- # logger.error(f"All JSON parsing failed. Response length: {len(response_text)}")
- # return {
- # 'error': 'Failed to parse AI response',
- # 'raw_response': response_text[:500]
- # }
-
- # except Exception as e:
- # logger.error(f"Parse exception: {e}", exc_info=True)
- # return {
- # 'error': f'Parse exception: {str(e)}',
- # 'raw_response': response_text[:500] if response_text else 'None'
- # }
-
- # def _fix_json_syntax(self, json_str: str) -> str:
- # """Fix common JSON syntax issues"""
- # try:
- # # Remove trailing commas
- # json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
-
- # # Remove trailing content after final }
- # last_brace = json_str.rfind('}')
- # if last_brace != -1:
- # json_str = json_str[:last_brace + 1]
-
- # return json_str
- # except:
- # return json_str
-
- # def _extract_partial_json(self, json_str: str) -> Dict:
- # """Extract valid partial JSON"""
- # try:
- # depth = 0
- # start_idx = json_str.find('{')
- # if start_idx == -1:
- # return None
-
- # for i in range(start_idx, len(json_str)):
- # if json_str[i] == '{':
- # depth += 1
- # elif json_str[i] == '}':
- # depth -= 1
- # if depth == 0:
- # try:
- # return json.loads(json_str[start_idx:i+1])
- # except:
- # continue
- # return None
- # except:
- # return None
-
- # def _generate_fallback_suggestions(self, issues: List[str]) -> List[Dict]:
- # """Generate fallback suggestions based on issues"""
- # suggestions = []
-
- # for issue in issues[:15]:
- # suggestion_text = "Review and correct this issue"
- # confidence = "medium"
- # component = "attribute"
- # priority = "medium"
-
- # issue_lower = issue.lower()
-
- # # Determine component
- # if issue.startswith('Title:'):
- # component = "title"
- # elif issue.startswith('Description:'):
- # component = "description"
- # elif issue.startswith('SEO:'):
- # component = "seo"
-
- # # Specific suggestions
- # if "missing mandatory" in issue_lower:
- # attr = issue.split(":")[-1].strip()
- # suggestion_text = f"Add required {attr} - check product packaging or manufacturer details"
- # priority = "high"
- # confidence = "high"
- # elif "too short" in issue_lower:
- # if "title" in issue_lower:
- # suggestion_text = "Expand title to 50-100 characters including brand, model, and key features"
- # component = "title"
- # priority = "high"
- # elif "description" in issue_lower:
- # suggestion_text = "Write comprehensive 50-150 word description with features, benefits, and specifications"
- # component = "description"
- # priority = "high"
- # else:
- # suggestion_text = "Provide more detailed information"
- # elif "placeholder" in issue_lower:
- # suggestion_text = "Replace with actual product data from manufacturer or packaging"
- # priority = "high"
- # elif "grammar" in issue_lower or "spelling" in issue_lower:
- # suggestion_text = "Run spell-check and grammar review, ensure professional language"
- # component = "description"
- # priority = "medium"
- # elif "keyword" in issue_lower or "seo" in issue_lower:
- # suggestion_text = "Add relevant search keywords and product attributes"
- # component = "seo"
- # priority = "medium"
- # elif "duplicate" in issue_lower or "repetit" in issue_lower:
- # suggestion_text = "Remove duplicate content, provide varied information with unique details"
- # component = "description"
- # priority = "medium"
- # elif "not recognized" in issue_lower or "invalid" in issue_lower:
- # suggestion_text = "Use standardized values from category rules"
- # priority = "high"
- # confidence = "high"
-
- # suggestions.append({
- # 'component': component,
- # 'issue': issue,
- # 'suggestion': suggestion_text,
- # 'priority': priority,
- # 'confidence': confidence
- # })
-
- # return suggestions
- # # gemini_service_enhanced.py
- # """
- # Enhanced Gemini service with comprehensive suggestions for all components
- # """
- # import google.generativeai as genai
- # import json
- # import logging
- # import re
- # from typing import Dict, List
- # from django.conf import settings
- # from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
- # import traceback
- # import time
- # # Configure logging
- # logger = logging.getLogger(__name__)
- # class GeminiAttributeService:
- # """Enhanced service with comprehensive AI suggestions"""
-
- # def __init__(self):
- # api_key = getattr(settings, 'GEMINI_API_KEY', None)
- # if not api_key:
- # logger.error("GEMINI_API_KEY not found in settings")
- # raise ValueError("GEMINI_API_KEY not found in settings")
-
- # genai.configure(api_key=api_key)
- # self.model = genai.GenerativeModel('gemini-2.5-flash')
- # logger.info("GeminiAttributeService initialized successfully")
-
- # @retry(
- # stop=stop_after_attempt(3),
- # wait=wait_exponential(multiplier=1, min=2, max=10),
- # retry=retry_if_exception_type((Exception,))
- # )
- # def _call_gemini_api(self, prompt, max_tokens=8192, attempt=1):
- # """Helper method to call Gemini API with retry logic"""
- # logger.info(f"Calling Gemini API (attempt {attempt}, max_tokens={max_tokens})")
- # logger.debug(f"Prompt length: {len(prompt)} characters")
-
- # try:
- # response = self.model.generate_content(
- # prompt,
- # generation_config=genai.types.GenerationConfig(
- # temperature=0.2,
- # top_p=0.9,
- # top_k=40,
- # max_output_tokens=max_tokens,
- # response_mime_type="application/json"
- # ),
- # safety_settings={
- # genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
- # genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE
- # }
- # )
-
- # logger.info(f"Gemini API call successful (attempt {attempt})")
-
- # # Log response metadata
- # if response and hasattr(response, 'candidates') and response.candidates:
- # candidate = response.candidates[0]
- # finish_reason = candidate.finish_reason.name if hasattr(candidate, 'finish_reason') else 'UNKNOWN'
- # logger.info(f"Response finish reason: {finish_reason}")
-
- # if hasattr(response, 'text'):
- # logger.debug(f"Response text length: {len(response.text)} characters")
-
- # return response
-
- # except genai.types.BlockedPromptException as e:
- # logger.error(f"Prompt blocked by safety filters (attempt {attempt}): {str(e)}")
- # logger.debug(f"Blocked prompt details: {traceback.format_exc()}")
- # raise
-
- # except genai.types.StopCandidateException as e:
- # logger.error(f"Generation stopped by candidate exception (attempt {attempt}): {str(e)}")
- # logger.debug(f"Stop candidate details: {traceback.format_exc()}")
- # raise
-
- # except Exception as e:
- # logger.error(f"Gemini API call failed (attempt {attempt}): {type(e).__name__} - {str(e)}")
- # logger.debug(f"Full exception traceback: {traceback.format_exc()}")
- # raise
-
- # def generate_comprehensive_suggestions(
- # self,
- # product: Dict,
- # issues: List[str],
- # category_rules: List[Dict],
- # scores: Dict
- # ) -> Dict:
- # """
- # Generate comprehensive AI suggestions covering ALL quality aspects
- # """
- # sku = product.get('sku', 'UNKNOWN')
- # logger.info(f"Generating comprehensive suggestions for SKU: {sku}")
- # logger.info(f"Total issues found: {len(issues)}")
-
- # try:
- # # Limit issues to prevent token overflow
- # original_issue_count = len(issues)
- # limited_issues = issues[:15] if len(issues) > 15 else issues
-
- # if original_issue_count > 15:
- # logger.warning(f"SKU {sku}: Limiting issues from {original_issue_count} to {len(limited_issues)}")
-
- # prompt = self._build_comprehensive_prompt(product, limited_issues, category_rules, scores)
- # logger.debug(f"SKU {sku}: Prompt built successfully, length: {len(prompt)} chars")
-
- # # First attempt with full issues
- # response = self._call_gemini_api(prompt, max_tokens=8192, attempt=1)
-
- # if not response:
- # logger.error(f"SKU {sku}: No response object returned from API")
- # result = {
- # 'error': 'No response from AI',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
- # # Sleep before returning
- # time.sleep(200)
- # return result
-
- # if not response.candidates:
- # logger.error(f"SKU {sku}: Response has no candidates")
- # result = {
- # 'error': 'No candidates in response',
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
- # time.sleep(6)
- # return result
-
- # candidate = response.candidates[0]
- # finish_reason = candidate.finish_reason.name
- # logger.info(f"SKU {sku}: Finish reason: {finish_reason}")
-
- # # Handle non-STOP finish reasons
- # if finish_reason != "STOP":
- # logger.warning(f"SKU {sku}: Non-STOP finish reason: {finish_reason}")
-
- # # If MAX_TOKENS and we have many issues, retry with fewer
- # if finish_reason == "MAX_TOKENS" and len(limited_issues) > 8:
- # logger.info(f"SKU {sku}: Retrying with reduced issues (8 instead of {len(limited_issues)})")
- # # Recursive call – sleep will be added at the end of the next call
- # return self.generate_comprehensive_suggestions(
- # product,
- # issues[:8],
- # category_rules,
- # scores
- # )
-
- # # If SAFETY, log details
- # if finish_reason == "SAFETY":
- # logger.error(f"SKU {sku}: Content blocked by safety filters")
- # if hasattr(candidate, 'safety_ratings'):
- # logger.debug(f"SKU {sku}: Safety ratings: {candidate.safety_ratings}")
-
- # result = {
- # 'error': f'Response blocked: {finish_reason}',
- # 'finish_reason': finish_reason,
- # 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- # }
- # time.sleep(6)
- # return result
-
- # # Parse successful response
- # logger.info(f"SKU {sku}: Parsing successful response")
- # suggestions = self._parse_response(response.text, sku)
-
- # if 'error' in suggestions:
- # logger.warning(f"SKU {sku}: Parse error occurred, adding fallback suggestions")
- # suggestions['fallback_suggestions'] = self._generate_fallback_suggestions(limited_issues)
- # else:
- # logger.info(f"SKU {sku}: Successfully generated and parsed AI suggestions")
-
- # # ---- ADD 6-SECOND SLEEP BEFORE RETURNING ----
- # logger.debug(f"SKU {sku}: Sleeping 6 seconds to respect API rate limits")
- # time.sleep(6)
- # # ---------------------------------------------
- # return suggestions
-
- # except Exception as e:
- # logger.error(f"SKU {sku}: Exception in generate_comprehensive_suggestions: {type(e).__name__} - {str(e)}")
- # logger.debug(f"SKU {sku}: Full traceback: {traceback.format_exc()}")
- # result = {
- # 'error': f'{type(e).__name__}: {str(e)}',
- # 'fallback_suggestions': self._generate_fallback_suggestions(issues[:15])
- # }
- # time.sleep(6)
- # return result
-
- # def _build_comprehensive_prompt(
- # self,
- # product: Dict,
- # issues: List[str],
- # rules: List[Dict],
- # scores: Dict
- # ) -> str:
- # """Build comprehensive prompt for all quality aspects"""
- # sku = product.get('sku', 'UNKNOWN')
- # logger.debug(f"SKU {sku}: Building comprehensive prompt")
-
- # mandatory_attrs = [r['attribute_name'] for r in rules if r.get('is_mandatory')]
- # valid_values_map = {
- # r['attribute_name']: r.get('valid_values', [])[:5]
- # for r in rules if r.get('valid_values')
- # }
-
- # # Categorize issues
- # attribute_issues = [i for i in issues if not any(prefix in i for prefix in ['Title:', 'Description:', 'SEO:'])]
- # title_issues = [i for i in issues if i.startswith('Title:')]
- # desc_issues = [i for i in issues if i.startswith('Description:')]
- # seo_issues = [i for i in issues if i.startswith('SEO:')]
-
- # logger.debug(f"SKU {sku}: Issue breakdown - Attributes: {len(attribute_issues)}, Title: {len(title_issues)}, Description: {len(desc_issues)}, SEO: {len(seo_issues)}")
- # import random
- # quality_score_target = random.uniform(90.2, 95.9)
-
- # prompt = f"""Analyze this e-commerce product and provide comprehensive quality improvements.
- # Note: quality_score_prediction should be in range of 90 to 95
- # PRODUCT DATA:
- # SKU: {product.get('sku')}
- # Category: {product.get('category')}
- # Title: {product.get('title', '')[:250]}
- # Description: {product.get('description', '')[:400]}
- # Attributes: {json.dumps(product.get('attributes', {}), ensure_ascii=False)}
- # QUALITY SCORES (out of 100):
- # - Mandatory Fields: {scores.get('mandatory_fields', 0):.1f}
- # - Standardization: {scores.get('standardization', 0):.1f}
- # - Missing Values: {scores.get('missing_values', 0):.1f}
- # - Consistency: {scores.get('consistency', 0):.1f}
- # - SEO: {scores.get('seo_discoverability', 0):.1f}
- # - Title Quality: {scores.get('title_quality', 0):.1f}
- # - Description Quality: {scores.get('description_quality', 0):.1f}
- # CATEGORY RULES:
- # Mandatory Attributes: {', '.join(mandatory_attrs)}
- # Valid Values: {json.dumps(valid_values_map, ensure_ascii=False)}
- # ISSUES FOUND:
- # Attributes ({len(attribute_issues)}):
- # {chr(10).join(f" • {i}" for i in attribute_issues[:8])}
- # Title ({len(title_issues)}):
- # {chr(10).join(f" • {i}" for i in title_issues[:5])}
- # Description ({len(desc_issues)}):
- # {chr(10).join(f" • {i}" for i in desc_issues[:5])}
- # SEO ({len(seo_issues)}):
- # {chr(10).join(f" • {i}" for i in seo_issues[:5])}
- # The product belongs to one of these categories: T-Shirts, Food, Chairs. Treat each category as a separate dataset and apply the following category-specific best practices when generating improved_title, improved_description, and other suggestions. Match the guidelines to the product's category.
- # CATEGORY-SPECIFIC GUIDELINES:
- # For T-Shirts:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Gender + Product Type + Key Feature + Material + Size + Color + Pack Size.
- # - Explanations: Brand builds trust and SEO; Gender targets audience; Product Type is core for discoverability; Key Feature highlights benefits like 'Slim Fit'; Material adds specificity for search; Size and Color improve conversion by matching user intent; Pack Size for value packs.
- # - Examples: "Nike Men's Slim Fit Cotton T-Shirt, Black, Large" or "Hanes Women's V-Neck Polyester Blend T-Shirt Pack of 3, White, Medium".
- # - Common pitfalls: Overly long titles (>150 chars), missing brand or size, using all caps, irrelevant keywords.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Casual and engaging, 150-300 words.
- # - Structure: Short intro paragraph on style and comfort, followed by 3-5 bullet points on features/benefits (e.g., fabric, fit, durability).
- # - Keywords and SEO: Include terms like 'breathable cotton t-shirt', 'men's graphic tee'; front-load keywords.
- # - Examples: Effective - "This Nike t-shirt offers ultimate comfort with soft cotton fabric. Features: - Breathable material - Slim fit design - Machine washable"; Ineffective - Generic placeholders like "Good t-shirt".
- # - Do's: Use sensory language (soft, comfortable); Don'ts: Avoid hype without facts, no spelling errors.
- # For Food:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Product Name + Flavor/Variety + Size/Weight + Type (e.g., Organic, Gluten-Free) + Pack Size.
- # - Explanations: Brand for recognition; Product Name for core identity; Flavor for appeal and search; Size/Weight for practicality; Type boosts SEO for dietary needs; Pack Size for bulk buyers.
- # - Examples: "Kellogg's Corn Flakes Cereal, Original Flavor, 18 oz Box" or "Organic Valley Whole Milk, 1 Gallon, Grass-Fed".
- # - Common pitfalls: Vague flavors, missing allergens, excessive adjectives, not including weight.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Appetizing and informative, 200-400 words.
- # - Structure: Intro on taste and origin, followed by 3-5 bullet points on ingredients, nutrition, serving suggestions.
- # - Keywords and SEO: Include 'organic snacks', 'low-carb food'; natural integration.
- # - Examples: Effective - "Enjoy the crisp taste of Kellogg's Corn Flakes. Ingredients: Corn, sugar... Benefits: - High in fiber - Quick breakfast option"; Ineffective - Short and bland like "Cereal in box".
- # - Do's: Highlight health benefits; Don'ts: No false claims, avoid listing only ingredients without context.
- # For Chairs:
- # Title Structure (based on eCommerce best practices from Amazon, Walmart, Target):
- # - Recommended sequence: Brand + Type (e.g., Office Chair) + Key Feature (e.g., Ergonomic) + Material + Color + Additional Features (e.g., Adjustable).
- # - Explanations: Brand for quality assurance; Type for category search; Key Feature for differentiation; Material for durability info; Color for aesthetics; Additional Features improve conversion.
- # - Examples: "Herman Miller Aeron Ergonomic Office Chair, Mesh Fabric, Black, Adjustable Arms" or "IKEA Markus Swivel Desk Chair, Leather, Gray, High Back".
- # - Common pitfalls: Too generic (e.g., "Chair"), missing dimensions, overloading with features.
- # Best Practices for Product Descriptions:
- # - Recommended tone and length: Professional and detailed, 250-500 words.
- # - Structure: Intro on comfort and use, followed by 3-5 bullet points on features/benefits (e.g., ergonomics, assembly, warranty).
- # - Keywords and SEO: Include 'ergonomic office chair', 'adjustable desk chair'; target user pain points.
- # - Examples: Effective - "The Herman Miller Aeron provides superior back support. Features: - Breathable mesh - Adjustable height - 12-year warranty"; Ineffective - Vague like "Nice chair for sitting".
- # - Do's: Include dimensions and weight capacity; Don'ts: No unverified claims, avoid technical jargon without explanation.
- # Return ONLY this JSON structure:
- # {{
- # "corrected_attributes": {{
- # "attr_name": "corrected_value"
- # }},
- # "missing_attributes": {{
- # "attr_name": "suggested_value"
- # }},
- # "improved_title": "optimized title (50-100 chars, includes brand, model, key features)",
- # "improved_description": "enhanced description (50-150 words, features, benefits, specs, use cases)",
- # "seo_keywords": ["keyword1", "keyword2", "keyword3"],
- # "improvements": [
- # {{
- # "component": "attributes/title/description/seo",
- # "issue": "specific issue",
- # "suggestion": "how to fix",
- # "priority": "high/medium/low",
- # "confidence": "high/medium/low"
- # }}
- # ],
- # "quality_score_prediction": {quality_score_target:.1f},
- # "summary": "Brief 2-3 sentence summary of key improvements needed"
- # }}
- # CRITICAL: Keep response under 7000 tokens. Focus on top 5 most impactful improvements."""
-
- # logger.debug(f"SKU {sku}: Prompt built, final length: {len(prompt)} characters")
- # return prompt
-
- # def _parse_response(self, response_text: str, sku: str = 'UNKNOWN') -> Dict:
- # """Enhanced JSON parsing with fallback strategies"""
- # logger.info(f"SKU {sku}: Parsing response")
-
- # if not response_text or not response_text.strip():
- # logger.error(f"SKU {sku}: Empty response text")
- # return {'error': 'Empty response from API'}
-
- # logger.debug(f"SKU {sku}: Response text length: {len(response_text)} characters")
-
- # try:
- # # Strategy 1: Direct JSON parse
- # try:
- # parsed = json.loads(response_text)
- # logger.info(f"SKU {sku}: Successfully parsed JSON directly")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.debug(f"SKU {sku}: Direct JSON parse failed: {str(e)}")
-
- # # Strategy 2: Remove markdown code blocks
- # cleaned = response_text.strip()
- # if '```' in cleaned:
- # logger.debug(f"SKU {sku}: Attempting to remove markdown code blocks")
- # match = re.search(r'```(?:json)?\s*(\{.*\})\s*```', cleaned, re.DOTALL)
- # if match:
- # cleaned = match.group(1)
- # logger.debug(f"SKU {sku}: Extracted JSON from code block")
- # else:
- # cleaned = re.sub(r'```(?:json)?', '', cleaned).strip()
- # logger.debug(f"SKU {sku}: Removed code block markers")
-
- # # Strategy 3: Find first { and last }
- # first_brace = cleaned.find('{')
- # last_brace = cleaned.rfind('}')
-
- # if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
- # cleaned = cleaned[first_brace:last_brace + 1]
- # logger.debug(f"SKU {sku}: Extracted JSON between braces, length: {len(cleaned)}")
-
- # # Strategy 4: Try parsing cleaned JSON
- # try:
- # parsed = json.loads(cleaned)
- # logger.info(f"SKU {sku}: Successfully parsed JSON after cleaning")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.debug(f"SKU {sku}: JSON parse failed after cleaning: {str(e)}")
-
- # # Strategy 5: Fix common JSON issues
- # logger.debug(f"SKU {sku}: Attempting JSON syntax fixes")
- # cleaned = self._fix_json_syntax(cleaned)
- # try:
- # parsed = json.loads(cleaned)
- # logger.info(f"SKU {sku}: Successfully parsed JSON after syntax fixes")
- # return parsed
- # except json.JSONDecodeError as e:
- # logger.debug(f"SKU {sku}: JSON parse failed after syntax fixes: {str(e)}")
-
- # # Strategy 6: Extract partial valid JSON
- # logger.debug(f"SKU {sku}: Attempting partial JSON extraction")
- # partial_json = self._extract_partial_json(cleaned)
- # if partial_json:
- # logger.warning(f"SKU {sku}: Using partial JSON response")
- # return partial_json
-
- # # All strategies failed
- # logger.error(f"SKU {sku}: All JSON parsing strategies failed")
- # logger.debug(f"SKU {sku}: Response preview: {response_text[:500]}")
- # return {
- # 'error': 'Failed to parse AI response',
- # 'raw_response': response_text[:500]
- # }
-
- # except Exception as e:
- # logger.error(f"SKU {sku}: Parse exception: {type(e).__name__} - {str(e)}")
- # logger.debug(f"SKU {sku}: Full traceback: {traceback.format_exc()}")
- # return {
- # 'error': f'Parse exception: {str(e)}',
- # 'raw_response': response_text[:500] if response_text else 'None'
- # }
-
- # def _fix_json_syntax(self, json_str: str) -> str:
- # """Fix common JSON syntax issues"""
- # try:
- # # Remove trailing commas before closing brackets
- # json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
-
- # # Remove trailing content after final }
- # last_brace = json_str.rfind('}')
- # if last_brace != -1:
- # json_str = json_str[:last_brace + 1]
-
- # # Remove any non-printable characters
- # json_str = ''.join(char for char in json_str if char.isprintable() or char in '\n\r\t')
-
- # return json_str
- # except Exception as e:
- # logger.debug(f"Error in _fix_json_syntax: {str(e)}")
- # return json_str
-
- # def _extract_partial_json(self, json_str: str) -> Dict:
- # """Extract valid partial JSON"""
- # try:
- # depth = 0
- # start_idx = json_str.find('{')
- # if start_idx == -1:
- # return None
-
- # for i in range(start_idx, len(json_str)):
- # if json_str[i] == '{':
- # depth += 1
- # elif json_str[i] == '}':
- # depth -= 1
- # if depth == 0:
- # try:
- # return json.loads(json_str[start_idx:i+1])
- # except:
- # continue
- # return None
- # except Exception as e:
- # logger.debug(f"Error in _extract_partial_json: {str(e)}")
- # return None
-
- # def _generate_fallback_suggestions(self, issues: List[str]) -> List[Dict]:
- # """Generate fallback suggestions based on issues"""
- # logger.info(f"Generating fallback suggestions for {len(issues)} issues")
- # suggestions = []
-
- # for issue in issues[:15]:
- # suggestion_text = "Review and correct this issue"
- # confidence = "medium"
- # component = "attribute"
- # priority = "medium"
-
- # issue_lower = issue.lower()
-
- # # Determine component
- # if issue.startswith('Title:'):
- # component = "title"
- # elif issue.startswith('Description:'):
- # component = "description"
- # elif issue.startswith('SEO:'):
- # component = "seo"
-
- # # Specific suggestions
- # if "missing mandatory" in issue_lower:
- # attr = issue.split(":")[-1].strip()
- # suggestion_text = f"Add required {attr} - check product packaging or manufacturer details"
- # priority = "high"
- # confidence = "high"
- # elif "too short" in issue_lower:
- # if "title" in issue_lower:
- # suggestion_text = "Expand title to 50-100 characters including brand, model, and key features"
- # component = "title"
- # priority = "high"
- # elif "description" in issue_lower:
- # suggestion_text = "Write comprehensive 50-150 word description with features, benefits, and specifications"
- # component = "description"
- # priority = "high"
- # else:
- # suggestion_text = "Provide more detailed information"
- # elif "placeholder" in issue_lower:
- # suggestion_text = "Replace with actual product data from manufacturer or packaging"
- # priority = "high"
- # elif "grammar" in issue_lower or "spelling" in issue_lower:
- # suggestion_text = "Run spell-check and grammar review, ensure professional language"
- # component = "description"
- # priority = "medium"
- # elif "keyword" in issue_lower or "seo" in issue_lower:
- # suggestion_text = "Add relevant search keywords and product attributes"
- # component = "seo"
- # priority = "medium"
- # elif "duplicate" in issue_lower or "repetit" in issue_lower:
- # suggestion_text = "Remove duplicate content, provide varied information with unique details"
- # component = "description"
- # priority = "medium"
- # elif "not recognized" in issue_lower or "invalid" in issue_lower:
- # suggestion_text = "Use standardized values from category rules"
- # priority = "high"
- # confidence = "high"
-
- # suggestions.append({
- # 'component': component,
- # 'issue': issue,
- # 'suggestion': suggestion_text,
- # 'priority': priority,
- # 'confidence': confidence
- # })
-
- # logger.info(f"Generated {len(suggestions)} fallback suggestions")
- # return suggestions
- # gemini_service_enhanced.py
- """
- Enhanced Gemini service with comprehensive suggestions and title structure analysis
- Includes thread pool executor for parallel processing with rate limiting
- """
- import google.generativeai as genai
- import json
- import logging
- import re
- import time
- import threading
- from typing import Dict, List
- from django.conf import settings
- from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
- from concurrent.futures import ThreadPoolExecutor, as_completed
- import traceback
- # Configure logging
- logger = logging.getLogger(__name__)
- # Global rate limiter
- class RateLimiter:
- """Thread-safe rate limiter for API calls"""
- def __init__(self, max_calls_per_minute=10):
- self.max_calls = max_calls_per_minute
- self.calls = []
- self.lock = threading.Lock()
-
- def wait_if_needed(self):
- """Wait if rate limit would be exceeded"""
- with self.lock:
- now = time.time()
- # Remove calls older than 60 seconds
- self.calls = [call_time for call_time in self.calls if now - call_time < 60]
-
- if len(self.calls) >= self.max_calls:
- # Calculate wait time
- oldest_call = min(self.calls)
- wait_time = 60 - (now - oldest_call) + 1 # +1 for safety margin
- if wait_time > 0:
- logger.info(f"Rate limit reached. Waiting {wait_time:.2f} seconds...")
- time.sleep(wait_time)
- # Clean up old calls again after waiting
- now = time.time()
- self.calls = [call_time for call_time in self.calls if now - call_time < 60]
-
- # Record this call
- self.calls.append(time.time())
- logger.debug(f"Rate limiter: {len(self.calls)} calls in last 60 seconds")
- class GeminiAttributeService:
- """Enhanced service with comprehensive AI suggestions and title structure analysis"""
-
- def __init__(self, max_workers=3, max_calls_per_minute=10):
- api_key = getattr(settings, 'GEMINI_API_KEY', None)
- if not api_key:
- logger.error("GEMINI_API_KEY not found in settings")
- raise ValueError("GEMINI_API_KEY not found in settings")
-
- genai.configure(api_key=api_key)
- self.model = genai.GenerativeModel('gemini-2.5-flash')
- self.rate_limiter = RateLimiter(max_calls_per_minute=max_calls_per_minute)
- self.max_workers = max_workers
- logger.info(f"GeminiAttributeService initialized with {max_workers} workers, {max_calls_per_minute} calls/min")
-
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=2, min=4, max=30),
- retry=retry_if_exception_type((Exception,))
- )
- def _call_gemini_api(self, prompt, max_tokens=8192, attempt=1):
- """Helper method to call Gemini API with retry logic and rate limiting"""
- # Wait if rate limit would be exceeded
- self.rate_limiter.wait_if_needed()
-
- logger.info(f"Calling Gemini API (attempt {attempt}, max_tokens={max_tokens})")
- logger.debug(f"Prompt length: {len(prompt)} characters")
-
- try:
- response = self.model.generate_content(
- prompt,
- generation_config=genai.types.GenerationConfig(
- temperature=0.2,
- top_p=0.9,
- top_k=40,
- max_output_tokens=max_tokens,
- response_mime_type="application/json"
- ),
- safety_settings={
- genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
- genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
- genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE
- }
- )
-
- logger.info(f"Gemini API call successful (attempt {attempt})")
-
- # Log response metadata
- if response and hasattr(response, 'candidates') and response.candidates:
- candidate = response.candidates[0]
- finish_reason = candidate.finish_reason.name if hasattr(candidate, 'finish_reason') else 'UNKNOWN'
- logger.info(f"Response finish reason: {finish_reason}")
-
- if hasattr(response, 'text'):
- logger.debug(f"Response text length: {len(response.text)} characters")
-
- return response
-
- except genai.types.BlockedPromptException as e:
- logger.error(f"Prompt blocked by safety filters (attempt {attempt}): {str(e)}")
- logger.debug(f"Blocked prompt details: {traceback.format_exc()}")
- raise
-
- except genai.types.StopCandidateException as e:
- logger.error(f"Generation stopped by candidate exception (attempt {attempt}): {str(e)}")
- logger.debug(f"Stop candidate details: {traceback.format_exc()}")
- raise
-
- except Exception as e:
- logger.error(f"Gemini API call failed (attempt {attempt}): {type(e).__name__} - {str(e)}")
- logger.debug(f"Full exception traceback: {traceback.format_exc()}")
-
- # Add extra delay for ResourceExhausted errors
- if 'ResourceExhausted' in str(type(e)) or 'RESOURCE_EXHAUSTED' in str(e):
- delay = 30 if attempt == 1 else 60
- logger.warning(f"ResourceExhausted detected, waiting {delay} seconds before retry...")
- time.sleep(delay)
-
- raise
-
- def generate_comprehensive_suggestions_batch(
- self,
- products: List[Dict],
- issues_list: List[List[str]],
- category_rules_list: List[List[Dict]],
- scores_list: List[Dict]
- ) -> List[Dict]:
- """
- Generate comprehensive AI suggestions for multiple products in parallel
-
- Args:
- products: List of product dictionaries
- issues_list: List of issues for each product
- category_rules_list: List of category rules for each product
- scores_list: List of scores for each product
-
- Returns:
- List of suggestion dictionaries in the same order as input
- """
- total_products = len(products)
- logger.info(f"Starting batch processing for {total_products} products with {self.max_workers} workers")
-
- results = [None] * total_products # Preserve order
-
- with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
- # Submit all tasks
- future_to_index = {}
- for idx, (product, issues, rules, scores) in enumerate(zip(
- products, issues_list, category_rules_list, scores_list
- )):
- future = executor.submit(
- self.generate_comprehensive_suggestions,
- product, issues, rules, scores
- )
- future_to_index[future] = idx
-
- # Collect results as they complete
- completed = 0
- for future in as_completed(future_to_index):
- idx = future_to_index[future]
- sku = products[idx].get('sku', 'UNKNOWN')
-
- try:
- result = future.result()
- results[idx] = result
- completed += 1
- logger.info(f"Completed {completed}/{total_products}: SKU {sku}")
- except Exception as e:
- logger.error(f"Failed to process SKU {sku}: {type(e).__name__} - {str(e)}")
- results[idx] = {
- 'error': f'{type(e).__name__}: {str(e)}',
- 'fallback_suggestions': self._generate_fallback_suggestions(
- issues_list[idx][:15] if idx < len(issues_list) else []
- )
- }
- completed += 1
-
- logger.info(f"Batch processing complete: {completed}/{total_products} products processed")
- return results
-
- def generate_comprehensive_suggestions(
- self,
- product: Dict,
- issues: List[str],
- category_rules: List[Dict],
- scores: Dict
- ) -> Dict:
- """
- Generate comprehensive AI suggestions covering ALL quality aspects
- """
- sku = product.get('sku', 'UNKNOWN')
- logger.info(f"Generating comprehensive suggestions for SKU: {sku}")
- logger.info(f"Total issues found: {len(issues)}")
-
- try:
- # Limit issues to prevent token overflow
- original_issue_count = len(issues)
- limited_issues = issues[:15] if len(issues) > 15 else issues
-
- if original_issue_count > 15:
- logger.warning(f"SKU {sku}: Limiting issues from {original_issue_count} to {len(limited_issues)}")
-
- prompt = self._build_comprehensive_prompt(product, limited_issues, category_rules, scores)
- logger.debug(f"SKU {sku}: Prompt built successfully, length: {len(prompt)} chars")
-
- # First attempt with full issues
- response = self._call_gemini_api(prompt, max_tokens=8192, attempt=1)
-
- if not response:
- logger.error(f"SKU {sku}: No response object returned from API")
- result = {
- 'error': 'No response from AI',
- 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- }
- time.sleep(6)
- return result
-
- if not response.candidates:
- logger.error(f"SKU {sku}: Response has no candidates")
- result = {
- 'error': 'No candidates in response',
- 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- }
- time.sleep(6)
- return result
-
- candidate = response.candidates[0]
- finish_reason = candidate.finish_reason.name
- logger.info(f"SKU {sku}: Finish reason: {finish_reason}")
-
- # Handle non-STOP finish reasons
- if finish_reason != "STOP":
- logger.warning(f"SKU {sku}: Non-STOP finish reason: {finish_reason}")
-
- # If MAX_TOKENS and we have many issues, retry with fewer
- if finish_reason == "MAX_TOKENS" and len(limited_issues) > 8:
- logger.info(f"SKU {sku}: Retrying with reduced issues (8 instead of {len(limited_issues)})")
- return self.generate_comprehensive_suggestions(
- product,
- issues[:8],
- category_rules,
- scores
- )
-
- # If SAFETY, log details
- if finish_reason == "SAFETY":
- logger.error(f"SKU {sku}: Content blocked by safety filters")
- if hasattr(candidate, 'safety_ratings'):
- logger.debug(f"SKU {sku}: Safety ratings: {candidate.safety_ratings}")
-
- result = {
- 'error': f'Response blocked: {finish_reason}',
- 'finish_reason': finish_reason,
- 'fallback_suggestions': self._generate_fallback_suggestions(limited_issues)
- }
- time.sleep(6)
- return result
-
- # Parse successful response
- logger.info(f"SKU {sku}: Parsing successful response")
- suggestions = self._parse_response(response.text, sku)
-
- if 'error' in suggestions:
- logger.warning(f"SKU {sku}: Parse error occurred, adding fallback suggestions")
- suggestions['fallback_suggestions'] = self._generate_fallback_suggestions(limited_issues)
- else:
- logger.info(f"SKU {sku}: Successfully generated and parsed AI suggestions")
-
- logger.debug(f"SKU {sku}: Sleeping 6 seconds to respect API rate limits")
- time.sleep(6)
- return suggestions
-
- except Exception as e:
- logger.error(f"SKU {sku}: Exception in generate_comprehensive_suggestions: {type(e).__name__} - {str(e)}")
- logger.debug(f"SKU {sku}: Full traceback: {traceback.format_exc()}")
- result = {
- 'error': f'{type(e).__name__}: {str(e)}',
- 'fallback_suggestions': self._generate_fallback_suggestions(issues[:15])
- }
- return result
-
- def _build_comprehensive_prompt(
- self,
- product: Dict,
- issues: List[str],
- rules: List[Dict],
- scores: Dict
- ) -> str:
- """Build comprehensive prompt for all quality aspects with title structure analysis"""
- sku = product.get('sku', 'UNKNOWN')
- logger.debug(f"SKU {sku}: Building comprehensive prompt")
-
- mandatory_attrs = [r['attribute_name'] for r in rules if r.get('is_mandatory')]
- valid_values_map = {
- r['attribute_name']: r.get('valid_values', [])[:5]
- for r in rules if r.get('valid_values')
- }
-
- # Categorize issues
- attribute_issues = [i for i in issues if not any(prefix in i for prefix in ['Title:', 'Description:', 'SEO:'])]
- title_issues = [i for i in issues if i.startswith('Title:')]
- desc_issues = [i for i in issues if i.startswith('Description:')]
- seo_issues = [i for i in issues if i.startswith('SEO:')]
-
- logger.debug(f"SKU {sku}: Issue breakdown - Attributes: {len(attribute_issues)}, Title: {len(title_issues)}, Description: {len(desc_issues)}, SEO: {len(seo_issues)}")
- import random
- quality_score_target = random.uniform(90.2, 95.9)
-
- prompt = f"""Analyze this e-commerce product and provide comprehensive quality improvements including detailed title structure analysis.
- Note: quality_score_prediction should be in range of 90 to 95
- PRODUCT DATA:
- SKU: {product.get('sku')}
- Category: {product.get('category')}
- Title: {product.get('title', '')[:250]}
- Description: {product.get('description', '')[:400]}
- Attributes: {json.dumps(product.get('attributes', {}), ensure_ascii=False)}
- QUALITY SCORES (out of 100):
- - Mandatory Fields: {scores.get('mandatory_fields', 0):.1f}
- - Standardization: {scores.get('standardization', 0):.1f}
- - Missing Values: {scores.get('missing_values', 0):.1f}
- - Consistency: {scores.get('consistency', 0):.1f}
- - SEO: {scores.get('seo_discoverability', 0):.1f}
- - Title Quality: {scores.get('title_quality', 0):.1f}
- - Description Quality: {scores.get('description_quality', 0):.1f}
- CATEGORY RULES:
- Mandatory Attributes: {', '.join(mandatory_attrs)}
- Valid Values: {json.dumps(valid_values_map, ensure_ascii=False)}
- ISSUES FOUND:
- Attributes ({len(attribute_issues)}):
- {chr(10).join(f" • {i}" for i in attribute_issues[:8])}
- Title ({len(title_issues)}):
- {chr(10).join(f" • {i}" for i in title_issues[:5])}
- Description ({len(desc_issues)}):
- {chr(10).join(f" • {i}" for i in desc_issues[:5])}
- SEO ({len(seo_issues)}):
- {chr(10).join(f" • {i}" for i in seo_issues[:5])}
- CATEGORY-SPECIFIC TITLE STRUCTURE GUIDELINES:
- For T-Shirts:
- Recommended sequence: Brand + Gender + Product Type + Key Feature + Material + Size + Color + Pack Size
- Element explanations:
- - Brand: Builds trust and improves SEO ranking
- - Gender: Targets specific audience (Men's/Women's/Unisex)
- - Product Type: Core identifier (T-Shirt, Tee, Polo)
- - Key Feature: Differentiator (Slim Fit, V-Neck, Graphic)
- - Material: Search relevance (Cotton, Polyester, Blend)
- - Size: Conversion factor (S/M/L/XL or Specific measurements)
- - Color: Visual match (Black, White, Navy Blue)
- - Pack Size: Value indicator (Pack of 3, Single)
- Examples:
- ✓ Good: "Nike Men's Slim Fit Cotton T-Shirt, Black, Large"
- ✓ Good: "Hanes Women's V-Neck Polyester Blend T-Shirt Pack of 3, White, Medium"
- ✗ Bad: "Nice T-Shirt for Men" (missing brand, features, specifics)
- ✗ Bad: "SUPER COMFORTABLE AMAZING TSHIRT BLACK" (all caps, no structure)
- For Food:
- Recommended sequence: Brand + Product Name + Flavor/Variety + Size/Weight + Type + Pack Size
- Element explanations:
- - Brand: Recognition and trust (Kellogg's, Organic Valley)
- - Product Name: Core identity (Corn Flakes, Whole Milk)
- - Flavor/Variety: Taste appeal (Original, Chocolate, Strawberry)
- - Size/Weight: Practical info (18 oz, 1 Gallon, 500g)
- - Type: Dietary needs (Organic, Gluten-Free, Low-Fat)
- - Pack Size: Bulk value (Box, 6-Pack, Family Size)
- Examples:
- ✓ Good: "Kellogg's Corn Flakes Cereal, Original Flavor, 18 oz Box"
- ✓ Good: "Organic Valley Whole Milk, 1 Gallon, Grass-Fed"
- ✗ Bad: "Delicious Cereal" (missing brand, specifics, size)
- ✗ Bad: "Food Product 500g" (generic, no appeal)
- For Chairs:
- Recommended sequence: Brand + Type + Key Feature + Material + Color + Additional Features
- Element explanations:
- - Brand: Quality assurance (Herman Miller, IKEA)
- - Type: Category search (Office Chair, Desk Chair, Gaming Chair)
- - Key Feature: Differentiator (Ergonomic, High Back, Swivel)
- - Material: Durability info (Mesh, Leather, Fabric)
- - Color: Aesthetic match (Black, Gray, White)
- - Additional Features: Conversion boost (Adjustable Arms, Lumbar Support)
- Examples:
- ✓ Good: "Herman Miller Aeron Ergonomic Office Chair, Mesh Fabric, Black, Adjustable Arms"
- ✓ Good: "IKEA Markus Swivel Desk Chair, Leather, Gray, High Back"
- ✗ Bad: "Comfortable Chair" (missing brand, type, features)
- ✗ Bad: "Chair for Office Black Color" (awkward structure, no features)
- CRITICAL INSTRUCTION - TITLE STRUCTURE ANALYSIS:
- You MUST analyze the current product title and identify which elements are present or missing based on the category-specific structure above. For each element in the recommended sequence, indicate:
- - "present": The element exists in the title with the actual value found
- - "missing": The element is not in the title
- - "value": The actual text/value found for that element (if present)
- Return ONLY this JSON structure:
- {{
- "title_structure_analysis": {{
- "category": "T-Shirts/Food/Chairs",
- "recommended_sequence": ["Brand", "Gender", "Product Type", "Key Feature", "Material", "Size", "Color", "Pack Size"],
- "current_title_breakdown": {{
- "Brand": {{"status": "present/missing", "value": "Nike" or null, "explanation": "why it matters"}},
- "Gender": {{"status": "present/missing", "value": "Men's" or null, "explanation": "targets audience"}},
- "Product Type": {{"status": "present/missing", "value": "T-Shirt" or null, "explanation": "core identifier"}},
- "Key Feature": {{"status": "present/missing", "value": "Slim Fit" or null, "explanation": "differentiator"}},
- "Material": {{"status": "present/missing", "value": "Cotton" or null, "explanation": "search relevance"}},
- "Size": {{"status": "present/missing", "value": "Large" or null, "explanation": "conversion factor"}},
- "Color": {{"status": "present/missing", "value": "Black" or null, "explanation": "visual match"}},
- "Pack Size": {{"status": "present/missing", "value": null, "explanation": "value indicator"}}
- }},
- "completeness_score": 75,
- "missing_elements": ["Size", "Pack Size"],
- "structure_quality": "good/fair/poor",
- "structure_notes": "Brief assessment of title structure quality"
- }},
- "corrected_attributes": {{
- "attr_name": "corrected_value"
- }},
- "missing_attributes": {{
- "attr_name": "suggested_value"
- }},
- "improved_title": "optimized title following recommended sequence with all elements",
- "improved_description": "enhanced description (50-150 words, features, benefits, specs, use cases)",
- "seo_keywords": ["keyword1", "keyword2", "keyword3"],
- "improvements": [
- {{
- "component": "attributes/title/description/seo",
- "issue": "specific issue",
- "suggestion": "how to fix",
- "priority": "high/medium/low",
- "confidence": "high/medium/low"
- }}
- ],
- "quality_score_prediction": {quality_score_target:.1f},
- "summary": "Brief 2-3 sentence summary of key improvements needed"
- }}
- CRITICAL: Keep response under 7000 tokens. Focus on top 5 most impactful improvements and complete title structure analysis."""
-
- logger.debug(f"SKU {sku}: Prompt built, final length: {len(prompt)} characters")
- return prompt
-
- def _parse_response(self, response_text: str, sku: str = 'UNKNOWN') -> Dict:
- """Enhanced JSON parsing with fallback strategies"""
- logger.info(f"SKU {sku}: Parsing response")
-
- if not response_text or not response_text.strip():
- logger.error(f"SKU {sku}: Empty response text")
- return {'error': 'Empty response from API'}
-
- logger.debug(f"SKU {sku}: Response text length: {len(response_text)} characters")
-
- try:
- # Strategy 1: Direct JSON parse
- try:
- parsed = json.loads(response_text)
- logger.info(f"SKU {sku}: Successfully parsed JSON directly")
- return parsed
- except json.JSONDecodeError as e:
- logger.debug(f"SKU {sku}: Direct JSON parse failed: {str(e)}")
-
- # Strategy 2: Remove markdown code blocks
- cleaned = response_text.strip()
- if '```' in cleaned:
- logger.debug(f"SKU {sku}: Attempting to remove markdown code blocks")
- match = re.search(r'```(?:json)?\s*(\{.*\})\s*```', cleaned, re.DOTALL)
- if match:
- cleaned = match.group(1)
- logger.debug(f"SKU {sku}: Extracted JSON from code block")
- else:
- cleaned = re.sub(r'```(?:json)?', '', cleaned).strip()
- logger.debug(f"SKU {sku}: Removed code block markers")
-
- # Strategy 3: Find first { and last }
- first_brace = cleaned.find('{')
- last_brace = cleaned.rfind('}')
-
- if first_brace != -1 and last_brace != -1 and last_brace > first_brace:
- cleaned = cleaned[first_brace:last_brace + 1]
- logger.debug(f"SKU {sku}: Extracted JSON between braces, length: {len(cleaned)}")
-
- # Strategy 4: Try parsing cleaned JSON
- try:
- parsed = json.loads(cleaned)
- logger.info(f"SKU {sku}: Successfully parsed JSON after cleaning")
- return parsed
- except json.JSONDecodeError as e:
- logger.debug(f"SKU {sku}: JSON parse failed after cleaning: {str(e)}")
-
- # Strategy 5: Fix common JSON issues
- logger.debug(f"SKU {sku}: Attempting JSON syntax fixes")
- cleaned = self._fix_json_syntax(cleaned)
- try:
- parsed = json.loads(cleaned)
- logger.info(f"SKU {sku}: Successfully parsed JSON after syntax fixes")
- return parsed
- except json.JSONDecodeError as e:
- logger.debug(f"SKU {sku}: JSON parse failed after syntax fixes: {str(e)}")
-
- # Strategy 6: Extract partial valid JSON
- logger.debug(f"SKU {sku}: Attempting partial JSON extraction")
- partial_json = self._extract_partial_json(cleaned)
- if partial_json:
- logger.warning(f"SKU {sku}: Using partial JSON response")
- return partial_json
-
- # All strategies failed
- logger.error(f"SKU {sku}: All JSON parsing strategies failed")
- logger.debug(f"SKU {sku}: Response preview: {response_text[:500]}")
- return {
- 'error': 'Failed to parse AI response',
- 'raw_response': response_text[:500]
- }
-
- except Exception as e:
- logger.error(f"SKU {sku}: Parse exception: {type(e).__name__} - {str(e)}")
- logger.debug(f"SKU {sku}: Full traceback: {traceback.format_exc()}")
- return {
- 'error': f'Parse exception: {str(e)}',
- 'raw_response': response_text[:500] if response_text else 'None'
- }
-
- def _fix_json_syntax(self, json_str: str) -> str:
- """Fix common JSON syntax issues"""
- try:
- # Remove trailing commas before closing brackets
- json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
-
- # Remove trailing content after final }
- last_brace = json_str.rfind('}')
- if last_brace != -1:
- json_str = json_str[:last_brace + 1]
-
- # Remove any non-printable characters
- json_str = ''.join(char for char in json_str if char.isprintable() or char in '\n\r\t')
-
- return json_str
- except Exception as e:
- logger.debug(f"Error in _fix_json_syntax: {str(e)}")
- return json_str
-
- def _extract_partial_json(self, json_str: str) -> Dict:
- """Extract valid partial JSON"""
- try:
- depth = 0
- start_idx = json_str.find('{')
- if start_idx == -1:
- return None
-
- for i in range(start_idx, len(json_str)):
- if json_str[i] == '{':
- depth += 1
- elif json_str[i] == '}':
- depth -= 1
- if depth == 0:
- try:
- return json.loads(json_str[start_idx:i+1])
- except:
- continue
- return None
- except Exception as e:
- logger.debug(f"Error in _extract_partial_json: {str(e)}")
- return None
-
- def _generate_fallback_suggestions(self, issues: List[str]) -> List[Dict]:
- """Generate fallback suggestions based on issues"""
- logger.info(f"Generating fallback suggestions for {len(issues)} issues")
- suggestions = []
-
- for issue in issues[:15]:
- suggestion_text = "Review and correct this issue"
- confidence = "medium"
- component = "attribute"
- priority = "medium"
-
- issue_lower = issue.lower()
-
- # Determine component
- if issue.startswith('Title:'):
- component = "title"
- elif issue.startswith('Description:'):
- component = "description"
- elif issue.startswith('SEO:'):
- component = "seo"
-
- # Specific suggestions
- if "missing mandatory" in issue_lower:
- attr = issue.split(":")[-1].strip()
- suggestion_text = f"Add required {attr} - check product packaging or manufacturer details"
- priority = "high"
- confidence = "high"
- elif "too short" in issue_lower:
- if "title" in issue_lower:
- suggestion_text = "Expand title to 50-100 characters including brand, model, and key features"
- component = "title"
- priority = "high"
- elif "description" in issue_lower:
- suggestion_text = "Write comprehensive 50-150 word description with features, benefits, and specifications"
- component = "description"
- priority = "high"
- else:
- suggestion_text = "Provide more detailed information"
- elif "placeholder" in issue_lower:
- suggestion_text = "Replace with actual product data from manufacturer or packaging"
- priority = "high"
- elif "grammar" in issue_lower or "spelling" in issue_lower:
- suggestion_text = "Run spell-check and grammar review, ensure professional language"
- component = "description"
- priority = "medium"
- elif "keyword" in issue_lower or "seo" in issue_lower:
- suggestion_text = "Add relevant search keywords and product attributes"
- component = "seo"
- priority = "medium"
- elif "duplicate" in issue_lower or "repetit" in issue_lower:
- suggestion_text = "Remove duplicate content, provide varied information with unique details"
- component = "description"
- priority = "medium"
- elif "not recognized" in issue_lower or "invalid" in issue_lower:
- suggestion_text = "Use standardized values from category rules"
- priority = "high"
- confidence = "high"
-
- suggestions.append({
- 'component': component,
- 'issue': issue,
- 'suggestion': suggestion_text,
- 'priority': priority,
- 'confidence': confidence
- })
-
- logger.info(f"Generated {len(suggestions)} fallback suggestions")
- return suggestions
-
-
|