#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
离职日志统一处理ETL脚本
=======================================
功能：
1. 扫描MDP, HUB, POTS, WDTS系统的日志文件
2. 解析不同格式的日志
3. 输出统一格式的CSV文件供Power BI使用

输出文件：
- Unified_Termination_Logs.csv: 详细日志
- Daily_Summary.csv: 每日汇总
- Error_Details.csv: 错误明细

使用方法：
python logs_etl.py                    # 处理所有日志
python logs_etl.py --incremental      # 增量处理（只处理新文件）
python logs_etl.py --date 2026-02-02  # 只处理指定日期
"""

import os
import re
import csv
import json
import pandas as pd
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any, Optional
import argparse
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs_etl.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class LogParserBase:
    """日志解析器基类"""

    def __init__(self, system_name: str, base_dir: Path):
        self.system_name = system_name
        self.base_dir = base_dir
        self.parsed_records = []
        self.summary = {
            'system': system_name,
            'total_processed': 0,
            'success_count': 0,
            'failed_count': 0,
            'skipped_count': 0,
            'errors': []
        }

    def parse(self):
        """解析日志，子类需实现"""
        raise NotImplementedError

    def get_records(self) -> List[Dict[str, Any]]:
        return self.parsed_records

    def get_summary(self) -> Dict[str, Any]:
        return self.summary


class MDPParser(LogParserBase):
    """MDP系统日志解析器"""

    def __init__(self, base_dir: Path):
        super().__init__('MDP', base_dir)

    def parse(self):
        """解析MDP CSV报告文件"""
        logger.info(f"[{self.system_name}] 开始解析...")

        # 查找所有Termination_Report CSV文件
        csv_files = list(self.base_dir.glob('Termination_Report_*.csv'))

        if not csv_files:
            logger.warning(f"[{self.system_name}] 未找到日志文件")
            return

        for csv_file in csv_files:
            try:
                # 从文件名提取日期
                date_match = re.search(r'(\d{8})', csv_file.stem)
                if date_match:
                    execution_date = datetime.strptime(date_match.group(1), '%Y%m%d').strftime('%Y-%m-%d')
                else:
                    execution_date = datetime.now().strftime('%Y-%m-%d')

                df = pd.read_csv(csv_file, encoding='utf-8-sig')

                for _, row in df.iterrows():
                    record = {
                        'ExecutionDate': execution_date,
                        'ExecutionTime': row.get('Timestamp', '').split(' ')[1] if pd.notna(row.get('Timestamp')) else '',
                        'System': self.system_name,
                        'LogFile': csv_file.name,
                        'EmployeeID': str(row.get('EmployeeId', '')) if pd.notna(row.get('EmployeeId')) else '',
                        'UserName': row.get('DisplayName', '') if pd.notna(row.get('DisplayName')) else '',
                        'Action': 'Disable User',
                        'Status': 'SUCCESS' if row.get('Status') == 'success' else 'FAILED',
                        'Message': row.get('Message', ''),
                        'Property': '',
                        'Position': '',
                        'ExtraInfo': json.dumps({
                            'SystemUserId': row.get('SystemUserId', '')
                        }, ensure_ascii=False)
                    }
                    self.parsed_records.append(record)

                    # 更新统计
                    self.summary['total_processed'] += 1
                    if record['Status'] == 'SUCCESS':
                        self.summary['success_count'] += 1
                    else:
                        self.summary['failed_count'] += 1
                        self.summary['errors'].append({
                            'employee_id': record['EmployeeID'],
                            'error': record['Message']
                        })

            except Exception as e:
                logger.error(f"[{self.system_name}] 解析文件 {csv_file.name} 失败: {e}")

        logger.info(f"[{self.system_name}] 解析完成: {len(self.parsed_records)} 条记录")


class HUBParser(LogParserBase):
    """HUB系统日志解析器"""

    def __init__(self, base_dir: Path):
        super().__init__('HUB', base_dir)
        self.log_info = {}  # 存储从日志提取的信息

    def parse(self):
        """解析HUB日志和CSV结果文件"""
        logger.info(f"[{self.system_name}] 开始解析...")

        # 1. 先解析文本日志（提取时间和统计信息）
        log_files = list(self.base_dir.glob('BatchInactive_Auto_*.log'))
        for log_file in log_files:
            self._parse_log_file(log_file)

        # 2. 解析CSV结果文件（合并信息）
        csv_files = list(self.base_dir.glob('*_Result_*.csv'))
        for csv_file in csv_files:
            self._parse_result_csv(csv_file)

        logger.info(f"[{self.system_name}] 解析完成: {len(self.parsed_records)} 条记录")

    def _parse_result_csv(self, csv_file: Path):
        """解析HUB CSV结果文件"""
        try:
            df = pd.read_csv(csv_file, encoding='utf-8-sig')

            # 从文件名提取日期和类型
            date_match = re.search(r'(\d{8})', csv_file.stem)
            if date_match:
                execution_date = datetime.strptime(date_match.group(1), '%Y%m%d').strftime('%Y-%m-%d')
            else:
                execution_date = datetime.now().strftime('%Y-%m-%d')

            # 确定操作类型
            action = 'Disable User' if 'DisableUser' in csv_file.stem else 'Disable Company Account'

            # 从日志信息中获取时间
            log_key = (execution_date, action)
            execution_time = self.log_info.get(log_key, {}).get('start_time', '')

            for _, row in df.iterrows():
                employee_id = str(row.get('EmployeeID', '')) if pd.notna(row.get('EmployeeID')) else ''

                record = {
                    'ExecutionDate': execution_date,
                    'ExecutionTime': execution_time,
                    'System': self.system_name,
                    'LogFile': csv_file.name,
                    'EmployeeID': employee_id,
                    'UserName': row.get('SamAccountName', '') if pd.notna(row.get('SamAccountName')) else '',
                    'Action': action,
                    'Status': row.get('Status', '').upper(),
                    'Message': row.get('Message', ''),
                    'Property': '',
                    'Position': '',
                    'ExtraInfo': ''
                }
                self.parsed_records.append(record)

                # 更新统计
                self.summary['total_processed'] += 1
                if record['Status'] == 'SUCCESS':
                    self.summary['success_count'] += 1
                elif record['Status'] == 'SKIPPED':
                    self.summary['skipped_count'] += 1
                elif record['Status'] == 'PARTIAL':
                    self.summary['failed_count'] += 1
                else:
                    self.summary['failed_count'] += 1

        except Exception as e:
            logger.error(f"[{self.system_name}] 解析CSV {csv_file.name} 失败: {e}")

    def _parse_log_file(self, log_file: Path):
        """解析HUB文本日志"""
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                content = f.read()

            # 从文件名提取日期
            date_match = re.search(r'(\d{8})', log_file.stem)
            if date_match:
                execution_date = datetime.strptime(date_match.group(1), '%Y%m%d').strftime('%Y-%m-%d')
            else:
                execution_date = datetime.now().strftime('%Y-%m-%d')

            # 提取开始时间
            start_time_match = re.search(
                r'\[(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})\].*?Complete Termination Mode',
                content
            )
            if start_time_match:
                start_time = start_time_match.group(1).split(' ')[1]
                # 保存Disable User的开始时间
                self.log_info[(execution_date, 'Disable User')] = {'start_time': start_time}
                self.log_info[(execution_date, 'Disable Company Account')] = {'start_time': start_time}

            # 提取Phase 1（Disable Company Account）的统计
            phase1_match = re.search(
                r'=== Phase 1: Disabling Company Accounts ===.*?'
                r'Success:\s+(\d+)\s+\|\s+Skipped:\s+(\d+)\s+\|\s+Failed:\s+(\d+)',
                content,
                re.DOTALL
            )
            if phase1_match:
                success = int(phase1_match.group(1))
                skipped = int(phase1_match.group(2))
                failed = int(phase1_match.group(3))
                # 保存公司账户处理统计
                self.log_info[(execution_date, 'Disable Company Account')]['stats'] = {
                    'success': success, 'skipped': skipped, 'failed': failed
                }

            # 提取Phase 2（Disable HUB Users）的统计
            phase2_match = re.search(
                r'=== Phase 2: Disabling HUB Users ===.*?'
                r'Success:\s+(\d+)\s+\|\s+Failed:\s+(\d+)\s+\|\s+Total:\s+(\d+)',
                content,
                re.DOTALL
            )
            if phase2_match:
                success = int(phase2_match.group(1))
                failed = int(phase2_match.group(2))
                total = int(phase2_match.group(3))
                # 保存用户处理统计
                self.log_info[(execution_date, 'Disable User')]['stats'] = {
                    'success': success, 'failed': failed, 'total': total
                }

            # 提取总体统计信息（备用）
            summary_match = re.search(
                r'Success:\s+(\d+)\s+\|\s+Failed:\s+(\d+)\s+\|\s+Total:\s+(\d+)',
                content
            )
            if summary_match and self.summary['total_processed'] == 0:
                # 只在没有CSV文件时使用日志统计
                self.summary['success_count'] = int(summary_match.group(1))
                self.summary['failed_count'] = int(summary_match.group(2))
                self.summary['total_processed'] = int(summary_match.group(3))

        except Exception as e:
            logger.error(f"[{self.system_name}] 解析日志 {log_file.name} 失败: {e}")


class POTSParser(LogParserBase):
    """POTS系统日志解析器"""

    def __init__(self, base_dir: Path):
        super().__init__('POTS', base_dir)

    def parse(self):
        """解析POTS日志文件"""
        logger.info(f"[{self.system_name}] 开始解析...")

        log_files = list(self.base_dir.glob('BatchTermination_DISABLE_*.log'))

        if not log_files:
            logger.warning(f"[{self.system_name}] 未找到日志文件")
            return

        for log_file in log_files:
            self._parse_log_file(log_file)

        logger.info(f"[{self.system_name}] 解析完成: {len(self.parsed_records)} 条记录")

    def _parse_log_file(self, log_file: Path):
        """解析POTS日志文件"""
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                lines = f.readlines()

            # 从文件名提取日期
            date_match = re.search(r'(\d{8})', log_file.stem)
            if date_match:
                execution_date = datetime.strptime(date_match.group(1), '%Y%m%d').strftime('%Y-%m-%d')
            else:
                execution_date = datetime.now().strftime('%Y-%m-%d')

            # 提取开始时间
            start_time = ''
            for line in lines:
                if 'Start Time:' in line:
                    time_match = re.search(r'Start Time:\s+([\d/:]+\s+[\d:]+)', line)
                    if time_match:
                        # 转换格式：02/02/2026 10:55:08 -> 10:55:08
                        start_time = time_match.group(1).split(' ')[-1]
                    break

            # 状态机解析
            current_employee_id = None
            account_details = {}

            for i, line in enumerate(lines):
                # 匹配: Processing EmployeeID: xxx
                emp_match = re.search(r'Processing EmployeeID:\s+(\d+)', line)
                if emp_match:
                    current_employee_id = emp_match.group(1)
                    account_details = {}
                    continue

                # 如果没有当前employee_id，跳过
                if not current_employee_id:
                    continue

                # 提取时间戳（如果还没有）
                if not account_details.get('timestamp'):
                    time_match = re.search(r'\[([\d\-:\s]+)\]', line)
                    if time_match:
                        account_details['timestamp'] = time_match.group(1)

                # 提取账户详情
                if 'ID:' in line:
                    id_match = re.search(r'ID:\s+([\w\-]+)', line)
                    if id_match:
                        account_details['id'] = id_match.group(1)

                elif 'UserName:' in line:
                    user_match = re.search(r'UserName:\s+(\w+)', line)
                    if user_match:
                        account_details['username'] = user_match.group(1)

                elif 'EmployeeName:' in line:
                    name_match = re.search(r'EmployeeName:\s+(.+)', line)
                    if name_match:
                        account_details['employee_name'] = name_match.group(1).strip()

                elif 'StaffNumber:' in line:
                    staff_match = re.search(r'StaffNumber:\s+(\d+)', line)
                    if staff_match:
                        account_details['staff_number'] = staff_match.group(1)

                elif 'Property:' in line:
                    prop_match = re.search(r'Property:\s+(.+)', line)
                    if prop_match:
                        account_details['property'] = prop_match.group(1).strip()

                elif 'Position:' in line:
                    pos_match = re.search(r'Position:\s+(.+)', line)
                    if pos_match:
                        account_details['position'] = pos_match.group(1).strip()

                elif 'Current Status:' in line:
                    status_match = re.search(r'Current Status:\s+(\w+)', line)
                    if status_match:
                        account_details['current_status'] = status_match.group(1)

                # 检查处理结果
                if 'Account is already DISABLED' in line:
                    # 创建SKIPPED记录
                    record = self._create_record(
                        execution_date, account_details.get('timestamp', start_time),
                        log_file.name, current_employee_id, account_details,
                        'SKIPPED', 'Account already disabled'
                    )
                    self.parsed_records.append(record)
                    self._update_stats('SKIPPED')
                    account_details = {}
                    current_employee_id = None

                elif 'Successfully DISABLED' in line:
                    # 创建SUCCESS记录
                    username_match = re.search(r'Successfully DISABLED account:\s+(\w+)', line)
                    if username_match:
                        account_details['username'] = username_match.group(1)

                    record = self._create_record(
                        execution_date, account_details.get('timestamp', start_time),
                        log_file.name, current_employee_id, account_details,
                        'SUCCESS', 'Successfully DISABLED'
                    )
                    self.parsed_records.append(record)
                    self._update_stats('SUCCESS')
                    account_details = {}
                    current_employee_id = None

                elif 'No accounts found for EmployeeID' in line:
                    # 创建FAILED记录（未找到账户）
                    # 需要获取employee_id
                    emp_match = re.search(r'No accounts found for EmployeeID:\s+(\d+)', line)
                    if emp_match:
                        emp_id = emp_match.group(1)
                        record = {
                            'ExecutionDate': execution_date,
                            'ExecutionTime': start_time,
                            'System': self.system_name,
                            'LogFile': log_file.name,
                            'EmployeeID': emp_id,
                            'UserName': '',
                            'Action': 'Disable Account',
                            'Status': 'FAILED',
                            'Message': 'No accounts found',
                            'Property': '',
                            'Position': '',
                            'ExtraInfo': ''
                        }
                        self.parsed_records.append(record)
                        self._update_stats('FAILED')

            # 提取汇总统计
            content = ''.join(lines)
            summary_match = re.search(
                r'Total EmployeeIDs Processed:\s+(\d+).*?'
                r'Total Accounts Processed \(DISABLE\):\s+(\d+).*?'
                r'Total Accounts Skipped:\s+(\d+)',
                content,
                re.DOTALL
            )
            if summary_match and self.summary['total_processed'] == 0:
                # 使用汇总数据作为参考
                total = int(summary_match.group(1))
                processed = int(summary_match.group(2))
                skipped = int(summary_match.group(3))
                logger.info(f"[{self.system_name}] 从汇总统计: {processed} 成功, {skipped} 跳过, {total - processed - skipped} 未找到")

        except Exception as e:
            logger.error(f"[{self.system_name}] 解析日志 {log_file.name} 失败: {e}")
            import traceback
            traceback.print_exc()

    def _create_record(self, execution_date, timestamp, log_file, employee_id, details, status, message):
        """创建记录字典"""
        # 从时间戳提取时分秒
        if timestamp:
            execution_time = timestamp.split(' ')[-1] if ' ' in timestamp else timestamp
        else:
            execution_time = ''

        return {
            'ExecutionDate': execution_date,
            'ExecutionTime': execution_time,
            'System': self.system_name,
            'LogFile': log_file,
            'EmployeeID': details.get('staff_number', employee_id),
            'UserName': details.get('username', ''),
            'Action': 'Disable Account',
            'Status': status,
            'Message': message,
            'Property': details.get('property', ''),
            'Position': details.get('position', ''),
            'ExtraInfo': json.dumps({
                'AccountID': details.get('id', ''),
                'EmployeeName': details.get('employee_name', ''),
                'PreviousStatus': details.get('current_status', '')
            }, ensure_ascii=False)
        }

    def _update_stats(self, status):
        """更新统计信息"""
        self.summary['total_processed'] += 1
        if status == 'SUCCESS':
            self.summary['success_count'] += 1
        elif status == 'SKIPPED':
            self.summary['skipped_count'] += 1
        else:
            self.summary['failed_count'] += 1


class WDTSParser(LogParserBase):
    """WDTS系统日志解析器"""

    def __init__(self, base_dir: Path):
        super().__init__('WDTS', base_dir)

    def parse(self):
        """解析WDTS日志文件"""
        logger.info(f"[{self.system_name}] 开始解析...")

        log_files = list((self.base_dir / 'log').glob('TerminationLog_*.log'))

        if not log_files:
            logger.warning(f"[{self.system_name}] 未找到日志文件")
            return

        for log_file in log_files:
            self._parse_log_file(log_file)

        logger.info(f"[{self.system_name}] 解析完成: {len(self.parsed_records)} 条记录")

    def _parse_log_file(self, log_file: Path):
        """解析WDTS日志文件"""
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                lines = f.readlines()

            # 从文件名提取日期
            date_match = re.search(r'(\d{8})', log_file.stem)
            if date_match:
                execution_date = datetime.strptime(date_match.group(1), '%Y%m%d').strftime('%Y-%m-%d')
            else:
                execution_date = datetime.now().strftime('%Y-%m-%d')

            current_employee = None

            for line in lines:
                # 匹配: Processing EmployeeID: xxx
                emp_match = re.search(r'Processing EmployeeID:\s+(\d+)', line)
                if emp_match:
                    current_employee = emp_match.group(1)
                    continue

                # 匹配: User not found
                if 'User not found for EmployeeID' in line and current_employee:
                    record = {
                        'ExecutionDate': execution_date,
                        'ExecutionTime': '',
                        'System': self.system_name,
                        'LogFile': log_file.name,
                        'EmployeeID': current_employee,
                        'UserName': '',
                        'Action': 'Disable User',
                        'Status': 'FAILED',
                        'Message': 'User not found',
                        'Property': '',
                        'Position': '',
                        'ExtraInfo': ''
                    }
                    self.parsed_records.append(record)
                    self.summary['total_processed'] += 1
                    self.summary['failed_count'] += 1
                    current_employee = None
                    continue

                # 匹配: Found user and Suspended
                found_match = re.search(
                    r'Found user:\s+(\d+)\s+\(UserId:\s+(\d+)\)',
                    line
                )
                if found_match and current_employee:
                    employee_id = found_match.group(1)
                    user_id = found_match.group(2)
                    current_employee = employee_id
                    continue

                # 匹配: Successfully suspended
                success_match = re.search(
                    r'Successfully suspended UserId:\s+\d+\s+\(EmployeeID:\s+(\d+)\)',
                    line
                )
                if success_match:
                    employee_id = success_match.group(1)

                    # 提取时间戳
                    time_match = re.search(r'\[([\d\-:\s]+)\]', line)
                    timestamp = time_match.group(1) if time_match else ''

                    record = {
                        'ExecutionDate': execution_date,
                        'ExecutionTime': timestamp.split(' ')[1] if timestamp else '',
                        'System': self.system_name,
                        'LogFile': log_file.name,
                        'EmployeeID': employee_id,
                        'UserName': '',
                        'Action': 'Disable User',
                        'Status': 'SUCCESS',
                        'Message': 'Successfully suspended',
                        'Property': '',
                        'Position': '',
                        'ExtraInfo': json.dumps({'UserId': user_id}, ensure_ascii=False)
                    }
                    self.parsed_records.append(record)
                    self.summary['total_processed'] += 1
                    self.summary['success_count'] += 1
                    current_employee = None

            # 提取汇总统计
            content = ''.join(lines)
            summary_match = re.search(
                r'Total processed:\s+(\d+).*?'
                r'Successfully suspended:\s+(\d+).*?'
                r'Failed to suspend:\s+(\d+).*?'
                r'Users not found:\s+(\d+)',
                content,
                re.DOTALL
            )
            if summary_match:
                # 如果没有解析到详细记录，使用汇总数据
                if self.summary['total_processed'] == 0:
                    self.summary['total_processed'] = int(summary_match.group(1))
                    self.summary['success_count'] = int(summary_match.group(2))
                    self.summary['failed_count'] = int(summary_match.group(3)) + int(summary_match.group(4))

        except Exception as e:
            logger.error(f"[{self.system_name}] 解析日志 {log_file.name} 失败: {e}")
            import traceback
            traceback.print_exc()


class ETLPipeline:
    """ETL流程管理器"""

    def __init__(self, base_dir: Path, output_dir: Path):
        self.base_dir = base_dir
        self.output_dir = output_dir
        self.output_dir.mkdir(exist_ok=True)

        # 创建子目录
        (self.output_dir / 'unparsed').mkdir(exist_ok=True)

        self.parsers = [
            MDPParser(base_dir / 'MDP'),
            HUBParser(base_dir / 'Hub'),
            POTSParser(base_dir / 'POTS'),
            WDTSParser(base_dir / 'wdts')
        ]

    def run(self, target_date: Optional[str] = None):
        """执行ETL流程"""
        logger.info("="*60)
        logger.info("离职日志ETL处理开始")
        logger.info("="*60)

        all_records = []
        all_summaries = []
        all_errors = []

        # 执行所有解析器
        for parser in self.parsers:
            try:
                parser.parse()
                records = parser.get_records()
                summary = parser.get_summary()

                all_records.extend(records)
                all_summaries.append(summary)

                # 收集错误信息
                for error in summary.get('errors', []):
                    error['system'] = summary['system']
                    all_errors.append(error)

            except Exception as e:
                logger.error(f"解析器 {parser.system_name} 执行失败: {e}")
                import traceback
                traceback.print_exc()

        # 过滤日期（如果指定）
        if target_date:
            all_records = [r for r in all_records if r['ExecutionDate'] == target_date]
            logger.info(f"已过滤到日期 {target_date} 的记录: {len(all_records)} 条")

        # 生成输出文件
        self._save_unified_logs(all_records)
        self._save_daily_summary(all_summaries, all_records)
        self._save_error_details(all_errors)

        logger.info("="*60)
        logger.info(f"ETL处理完成！共处理 {len(all_records)} 条记录")
        logger.info(f"输出目录: {self.output_dir}")
        logger.info("="*60)

    def _save_unified_logs(self, records: List[Dict[str, Any]]):
        """保存统一格式日志"""
        if not records:
            logger.warning("没有记录可保存")
            return

        output_file = self.output_dir / 'Unified_Termination_Logs.csv'

        # 定义字段顺序
        fieldnames = [
            'ExecutionDate', 'ExecutionTime', 'System', 'LogFile',
            'EmployeeID', 'UserName', 'Action', 'Status',
            'Message', 'Property', 'Position', 'ExtraInfo'
        ]

        with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
            writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
            writer.writeheader()
            writer.writerows(records)

        logger.info(f"✓ 已保存: {output_file.name} ({len(records)} 条)")

    def _save_daily_summary(self, summaries: List[Dict[str, Any]], records: List[Dict[str, Any]]):
        """保存每日汇总"""
        output_file = self.output_dir / 'Daily_Summary.csv'

        # 从records中按日期和系统汇总
        summary_data = {}

        for record in records:
            key = (record['ExecutionDate'], record['System'])
            if key not in summary_data:
                summary_data[key] = {
                    'ExecutionDate': record['ExecutionDate'],
                    'System': record['System'],
                    'TotalProcessed': 0,
                    'SuccessCount': 0,
                    'FailedCount': 0,
                    'SkippedCount': 0
                }

            summary_data[key]['TotalProcessed'] += 1
            if record['Status'] == 'SUCCESS':
                summary_data[key]['SuccessCount'] += 1
            elif record['Status'] == 'FAILED':
                summary_data[key]['FailedCount'] += 1
            elif record['Status'] == 'SKIPPED':
                summary_data[key]['SkippedCount'] += 1

        # 计算成功率
        for data in summary_data.values():
            if data['TotalProcessed'] > 0:
                data['SuccessRate'] = round((data['SuccessCount'] / data['TotalProcessed']) * 100, 2)
            else:
                data['SuccessRate'] = 0

        # 转为列表并排序
        summary_list = sorted(summary_data.values(), key=lambda x: (x['ExecutionDate'], x['System']))

        with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
            fieldnames = ['ExecutionDate', 'System', 'TotalProcessed', 'SuccessCount',
                         'FailedCount', 'SkippedCount', 'SuccessRate']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(summary_list)

        logger.info(f"✓ 已保存: {output_file.name} ({len(summary_list)} 行)")

    def _save_error_details(self, errors: List[Dict[str, Any]]):
        """保存错误明细"""
        if not errors:
            logger.info("没有错误记录")
            return

        output_file = self.output_dir / 'Error_Details.csv'

        # 按错误类型分组统计
        error_stats = {}
        for error in errors:
            error_type = error.get('error', 'Unknown')
            employee_id = error.get('employee_id', '')

            key = (error['system'], error_type)
            if key not in error_stats:
                error_stats[key] = {
                    'ExecutionDate': datetime.now().strftime('%Y-%m-%d'),
                    'System': error['system'],
                    'ErrorType': error_type,
                    'ErrorCount': 0,
                    'AffectedEmployees': []
                }

            error_stats[key]['ErrorCount'] += 1
            if employee_id and employee_id not in error_stats[key]['AffectedEmployees']:
                error_stats[key]['AffectedEmployees'].append(employee_id)

        # 转为列表
        error_list = []
        for data in error_stats.values():
            data['FirstOccurrence'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            data['EmployeeCount'] = len(data['AffectedEmployees'])
            data['TopEmployees'] = ', '.join(data['AffectedEmployees'][:5])
            del data['AffectedEmployees']
            error_list.append(data)

        # 排序
        error_list.sort(key=lambda x: x['ErrorCount'], reverse=True)

        with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
            fieldnames = ['ExecutionDate', 'System', 'ErrorType', 'ErrorCount',
                         'EmployeeCount', 'TopEmployees', 'FirstOccurrence']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(error_list)

        logger.info(f"✓ 已保存: {output_file.name} ({len(error_list)} 种错误类型)")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='离职日志ETL处理脚本')
    parser.add_argument('--incremental', action='store_true',
                       help='增量处理模式（只处理上次运行后的新文件）')
    parser.add_argument('--date', type=str,
                       help='只处理指定日期的日志 (格式: YYYY-MM-DD)')

    args = parser.parse_args()

    # 设置路径
    base_dir = Path(__file__).parent
    output_dir = base_dir / 'output'

    # 创建ETL流程
    pipeline = ETLPipeline(base_dir, output_dir)

    # 执行
    target_date = args.date if hasattr(args, 'date') and args.date else None
    pipeline.run(target_date=target_date)


if __name__ == '__main__':
    main()
