#!/usr/bin/env python3
"""
832平台爬虫 - 最终版本
"""

import asyncio
import csv
import re
from datetime import datetime
from playwright.async_api import async_playwright

CONFIG = {
    "output_file": "henan_products_final_v3.csv",
    "max_pages": 3,
}

COOKIES = [
    {"name": "gxyj_Sign-In-Token", "value": "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJjb2RlIjpudWxsLCJ1c2VyX25hbWUiOm51bGwsImNvbXBhbnlOYW1lIjoiIiwiY2xpZW50X2lkIjoiVU5JRklDQVRJT04iLCJhY2NvdW50U3RhdHVzIjoxLCJpc0ZyZWV6ZSI6MiwidWlkIjoiMTM5OTkwNzMxMDg5NzI4NDUyOCIsInB1cmNoYXNlclByb3BlcnR5IjowLCJjb21wYW55Tm8iOiIzMDAwMTA2MDE3IiwiZ3JhbnRfdHlwZSI6Im11bHRpX3Bhc3N3b3JkIiwic2NvcGUiOlsiYWxsIl0sImxvZ2luTmFtZSI6ImNoaW5hZGF2aWQiLCJleHAiOjE3NzMxOTgyNzEsImp0aSI6IjJmM2FiMGZhLTg2MGItNGVlNS05MTM2LThjNDU4MGIzZjRlNSIsInN0YXRpb25JZCI6IjEiLCJhZG1pblR5cGUiOjEsImN1cnJlbnRTdGF0aW9uSWQiOiIxIiwiYWNjb3VudFR5cGUiOiIzIiwiYWNjTm8iOiJBQ0MyNjAzMTAzMDAwMDAwMDAwMDAwMDcyIiwiY29tcGFueVN0YXR1cyI6MSwiYXV0aG9yaXRpZXMiOlsicm9vdCJdLCJhdWQiOlsiMSJdLCJwaG9uZSI6IjE4MTEwMDc4NzYyIiwibWFpbklkIjoiMTM5OTkwNzMxMDg5NzI4NDUzMCIsInVzZXJuYW1lIjoiY2hpbmFkYXZpZCJ9.CctYfcFiUdPUdmXEV7StkAU9PlAWgM-0v4dk6L6beV3SK93HT-b5g9tHIpO45CZTPa2pXw3GMK5X3dg6v5Y9Olxl35DAXYWhQVC2NjI-qkpOIfSSBM8i-DXR_HyIWpyeXWJBTWk4BdWZZPt8wkWzD5SGwM2-whcVQt5phsDrB8c", "domain": ".fupin832.com"},
]

CSV_HEADERS = ["序号", "商品名称", "单价(元)", "销量", "供应商名称", "联系电话", "商品链接", "抓取时间"]

def extract_supplier(name):
    """从商品名提取供应商"""
    for kw in ['县', '市', '旗', '区']:
        if kw in name:
            parts = name.split(kw)
            if len(parts[0]) >= 2:
                return parts[0] + kw
    return ''

async def main():
    print("🚀 启动爬虫...")
    
    pw = await async_playwright().start()
    browser = await pw.chromium.launch(headless=True, args=['--no-sandbox'])
    context = await browser.new_context(viewport={"width": 1920, "height": 1080})
    
    for c in COOKIES:
        await context.add_cookies([{"name": c["name"], "value": c["value"], "domain": c["domain"], "path": "/"}])
    
    page = await context.new_page()
    
    all_products = []
    
    for pg in range(1, CONFIG['max_pages'] + 1):
        print(f"\n📄 第 {pg}/{CONFIG['max_pages']} 页")
        
        url = f"https://ys.fupin832.com/product/list?areaCode=410000"
        if pg > 1:
            url += f"&page={pg}"
        
        await page.goto(url, timeout=30000)
        await page.wait_for_load_state("networkidle")
        await page.wait_for_timeout(3000)
        
        # 滚动
        for i in range(3):
            await page.evaluate(f"window.scrollBy(0, {(i+1)*600})")
            await page.wait_for_timeout(1000)
        
        # 提取
        products = await page.evaluate("""() => {
            const results = [];
            const seen = new Set();
            
            document.querySelectorAll('div').forEach(div => {
                const text = div.innerText?.trim() || '';
                if (!text.includes('￥')) return;
                
                const priceMatch = text.match(/￥\\s*(\\d+\\.?\\d*)/);
                if (!priceMatch) return;
                const price = priceMatch[1];
                
                const lines = text.split('\\n').map(l => l.trim()).filter(l => l);
                let name = '';
                for (const line of lines) {
                    if (!line.match(/^￥/) && !line.match(/^\\d+\\.?\\d*$/) && line.length > 5) {
                        name = line;
                        break;
                    }
                }
                
                if (!name || name.length < 5) return;
                const skipWords = ['资讯', '通知', '公告', '更多', '客服', '登录', '购物车', '首页', '关于', '联系'];
                if (skipWords.some(w => name.startsWith(w))) return;
                
                const key = name.substring(0, 20) + price;
                if (seen.has(key)) return;
                seen.add(key);
                
                results.push({name: name.substring(0, 100), price: price});
            });
            
            return results.slice(0, 50);
        }""")
        
        print(f"  提取: {len(products)} 条")
        
        # 处理
        for p in products:
            p['supplier'] = extract_supplier(p.get('name', ''))
            p['contact'] = ''
        
        all_products.extend(products)
        print(f"  累计: {len(all_products)} 条")
    
    # 保存
    if all_products:
        # 清理去重
        cleaned = []
        seen = set()
        for p in all_products:
            key = p.get('name', '')[:20] + str(p.get('price', ''))
            if key not in seen:
                seen.add(key)
                cleaned.append(p)
        
        with open(CONFIG["output_file"], 'w', newline='', encoding='utf-8-sig') as f:
            w = csv.DictWriter(f, fieldnames=CSV_HEADERS)
            w.writeheader()
            for i, p in enumerate(cleaned, 1):
                w.writerow({
                    "序号": i,
                    "商品名称": p.get('name', ''),
                    "单价(元)": p.get('price', ''),
                    "销量": '',
                    "供应商名称": p.get('supplier', ''),
                    "联系电话": p.get('contact', ''),
                    "商品链接": '',
                    "抓取时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                })
        
        sup_cnt = len([p for p in cleaned if p.get('supplier')])
        print(f"\n✅ 保存: {CONFIG['output_file']}")
        print(f"  总数: {len(cleaned)}")
        print(f"  有供应商: {sup_cnt}")
        
        print(f"\n📋 预览:")
        for i, p in enumerate(cleaned[:8], 1):
            print(f"  {i}. {p.get('name', '')[:35]}")
            print(f"     💰{p.get('price', '')}元 | 🏪{p.get('supplier', '')}")
    
    await browser.close()
    print("\n🎉 完成!")

if __name__ == "__main__":
    asyncio.run(main())
