""" Unified Analytics Service Provides comprehensive analytics and metrics for the PX Command Center Dashboard. Consolidates data from complaints, surveys, actions, physicians, and other modules. """ from datetime import datetime, timedelta from typing import Dict, List, Optional, Any from django.db.models import Avg, Count, Q, Sum, F, ExpressionWrapper, DurationField from django.utils import timezone from django.core.cache import cache from apps.complaints.models import Complaint, Inquiry, ComplaintStatus from apps.complaints.analytics import ComplaintAnalytics from apps.px_action_center.models import PXAction from apps.surveys.models import SurveyInstance from apps.social.models import SocialMediaComment from apps.callcenter.models import CallCenterInteraction from apps.physicians.models import PhysicianMonthlyRating from apps.organizations.models import Department, Hospital from apps.ai_engine.models import SentimentResult from apps.analytics.models import KPI, KPIValue class UnifiedAnalyticsService: """ Unified service for all PX360 analytics and KPIs. Provides methods to retrieve: - All KPIs with filters - Chart data for various visualizations - Department performance metrics - Physician analytics - Sentiment analysis metrics - SLA compliance data """ # Cache timeout (in seconds) - 5 minutes for most data CACHE_TIMEOUT = 300 @staticmethod def _get_cache_key(prefix: str, **kwargs) -> str: """Generate cache key based on parameters""" parts = [prefix] for key, value in sorted(kwargs.items()): if value is not None: parts.append(f"{key}:{value}") return ":".join(parts) @staticmethod def _get_date_range(date_range: str, custom_start=None, custom_end=None) -> tuple: """ Get start and end dates based on date_range parameter. Args: date_range: '7d', '30d', '90d', 'this_month', 'last_month', 'quarter', 'year', or 'custom' custom_start: Custom start date (required if date_range='custom') custom_end: Custom end date (required if date_range='custom') Returns: tuple: (start_date, end_date) """ now = timezone.now() if date_range == 'custom' and custom_start and custom_end: return custom_start, custom_end date_ranges = { '7d': timedelta(days=7), '30d': timedelta(days=30), '90d': timedelta(days=90), } if date_range in date_ranges: end_date = now start_date = now - date_ranges[date_range] return start_date, end_date elif date_range == 'this_month': start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) end_date = now return start_date, end_date elif date_range == 'last_month': if now.month == 1: start_date = now.replace(year=now.year-1, month=12, day=1, hour=0, minute=0, second=0, microsecond=0) end_date = now.replace(year=now.year-1, month=12, day=31, hour=23, minute=59, second=59) else: start_date = now.replace(month=now.month-1, day=1, hour=0, minute=0, second=0, microsecond=0) # Get last day of previous month next_month = now.replace(day=1) last_day = (next_month - timedelta(days=1)).day end_date = now.replace(month=now.month-1, day=last_day, hour=23, minute=59, second=59) return start_date, end_date elif date_range == 'quarter': current_quarter = (now.month - 1) // 3 start_month = current_quarter * 3 + 1 start_date = now.replace(month=start_month, day=1, hour=0, minute=0, second=0, microsecond=0) end_date = now return start_date, end_date elif date_range == 'year': start_date = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) end_date = now return start_date, end_date # Default to 30 days return now - timedelta(days=30), now @staticmethod def _filter_by_role(queryset, user) -> Any: """ Filter queryset based on user role and permissions. Args: queryset: Django queryset user: User object Returns: Filtered queryset """ # Check if queryset has hospital/department fields if hasattr(queryset.model, 'hospital'): if user.is_px_admin(): pass # See all elif user.is_hospital_admin() and user.hospital: queryset = queryset.filter(hospital=user.hospital) elif user.is_department_manager() and user.department: queryset = queryset.filter(department=user.department) else: queryset = queryset.none() return queryset @staticmethod def get_all_kpis( user, date_range: str = '30d', hospital_id: Optional[str] = None, department_id: Optional[str] = None, kpi_category: Optional[str] = None, custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None ) -> Dict[str, Any]: """ Get all KPIs with applied filters. Args: user: Current user date_range: Date range filter hospital_id: Optional hospital filter department_id: Optional department filter kpi_category: Optional KPI category filter custom_start: Custom start date custom_end: Custom end date Returns: dict: All KPI values """ start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) cache_key = UnifiedAnalyticsService._get_cache_key( 'all_kpis', user_id=user.id, date_range=date_range, hospital_id=hospital_id, department_id=department_id, kpi_category=kpi_category ) cached_data = cache.get(cache_key) if cached_data: return cached_data # Get base querysets with role filtering complaints_qs = UnifiedAnalyticsService._filter_by_role( Complaint.objects.all(), user ).filter(created_at__gte=start_date, created_at__lte=end_date) actions_qs = UnifiedAnalyticsService._filter_by_role( PXAction.objects.all(), user ).filter(created_at__gte=start_date, created_at__lte=end_date) surveys_qs = UnifiedAnalyticsService._filter_by_role( SurveyInstance.objects.all(), user ).filter( completed_at__gte=start_date, completed_at__lte=end_date, status='completed' ) # Apply additional filters if hospital_id: hospital = Hospital.objects.filter(id=hospital_id).first() if hospital: complaints_qs = complaints_qs.filter(hospital=hospital) actions_qs = actions_qs.filter(hospital=hospital) surveys_qs = surveys_qs.filter(survey_template__hospital=hospital) if department_id: department = Department.objects.filter(id=department_id).first() if department: complaints_qs = complaints_qs.filter(department=department) actions_qs = actions_qs.filter(department=department) surveys_qs = surveys_qs.filter(journey_stage_instance__department=department) # Calculate KPIs kpis = { # Complaints KPIs 'total_complaints': int(complaints_qs.count()), 'open_complaints': int(complaints_qs.filter(status__in=['open', 'in_progress']).count()), 'overdue_complaints': int(complaints_qs.filter(is_overdue=True).count()), 'high_severity_complaints': int(complaints_qs.filter(severity__in=['high', 'critical']).count()), 'resolved_complaints': int(complaints_qs.filter(status__in=['resolved', 'closed']).count()), # Actions KPIs 'total_actions': int(actions_qs.count()), 'open_actions': int(actions_qs.filter(status__in=['open', 'in_progress']).count()), 'overdue_actions': int(actions_qs.filter(is_overdue=True).count()), 'escalated_actions': int(actions_qs.filter(escalation_level__gt=0).count()), 'resolved_actions': int(actions_qs.filter(status='completed').count()), # Survey KPIs 'total_surveys': int(surveys_qs.count()), 'negative_surveys': int(surveys_qs.filter(is_negative=True).count()), 'avg_survey_score': float(surveys_qs.aggregate(avg=Avg('total_score'))['avg'] or 0), # Social Media KPIs # Sentiment is stored in ai_analysis JSON field as ai_analysis.sentiment 'negative_social_comments': int(SocialMediaComment.objects.filter( ai_analysis__sentiment='negative', published_at__gte=start_date, published_at__lte=end_date ).count()), # Call Center KPIs 'low_call_ratings': int(CallCenterInteraction.objects.filter( is_low_rating=True, call_started_at__gte=start_date, call_started_at__lte=end_date ).count()), # Sentiment KPIs 'total_sentiment_analyses': int(SentimentResult.objects.filter( created_at__gte=start_date, created_at__lte=end_date ).count()), } # Add trends (compare with previous period) prev_start, prev_end = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) # Shift back by same duration duration = end_date - start_date prev_start = start_date - duration prev_end = end_date - duration prev_complaints = int(complaints_qs.filter( created_at__gte=prev_start, created_at__lte=prev_end ).count()) kpis['complaints_trend'] = { 'current': kpis['total_complaints'], 'previous': prev_complaints, 'percentage_change': float( ((kpis['total_complaints'] - prev_complaints) / prev_complaints * 100) if prev_complaints > 0 else 0 ) } # Cache the results cache.set(cache_key, kpis, UnifiedAnalyticsService.CACHE_TIMEOUT) return kpis @staticmethod def get_chart_data( user, chart_type: str, date_range: str = '30d', hospital_id: Optional[str] = None, department_id: Optional[str] = None, custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None ) -> Dict[str, Any]: """ Get data for specific chart types. Args: user: Current user chart_type: Type of chart ('complaints_trend', 'sla_compliance', 'survey_satisfaction', etc.) date_range: Date range filter hospital_id: Optional hospital filter department_id: Optional department filter custom_start: Custom start date custom_end: Custom end date Returns: dict: Chart data in format suitable for ApexCharts """ start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) cache_key = UnifiedAnalyticsService._get_cache_key( f'chart_{chart_type}', user_id=user.id, date_range=date_range, hospital_id=hospital_id, department_id=department_id ) cached_data = cache.get(cache_key) if cached_data: return cached_data # Get base complaint queryset complaints_qs = UnifiedAnalyticsService._filter_by_role( Complaint.objects.all(), user ).filter(created_at__gte=start_date, created_at__lte=end_date) surveys_qs = UnifiedAnalyticsService._filter_by_role( SurveyInstance.objects.all(), user ).filter( completed_at__gte=start_date, completed_at__lte=end_date, status='completed' ) # Apply filters if hospital_id: complaints_qs = complaints_qs.filter(hospital_id=hospital_id) surveys_qs = surveys_qs.filter(survey_template__hospital_id=hospital_id) if department_id: complaints_qs = complaints_qs.filter(department_id=department_id) surveys_qs = surveys_qs.filter(journey_stage_instance__department_id=department_id) if chart_type == 'complaints_trend': data = UnifiedAnalyticsService._get_complaints_trend(complaints_qs, start_date, end_date) elif chart_type == 'complaints_by_category': data = UnifiedAnalyticsService._get_complaints_by_category(complaints_qs) elif chart_type == 'complaints_by_severity': data = UnifiedAnalyticsService._get_complaints_by_severity(complaints_qs) elif chart_type == 'sla_compliance': data = ComplaintAnalytics.get_sla_compliance( hospital_id and Hospital.objects.filter(id=hospital_id).first(), days=(end_date - start_date).days ) elif chart_type == 'resolution_rate': data = ComplaintAnalytics.get_resolution_rate( hospital_id and Hospital.objects.filter(id=hospital_id).first(), days=(end_date - start_date).days ) elif chart_type == 'survey_satisfaction_trend': data = UnifiedAnalyticsService._get_survey_satisfaction_trend(surveys_qs, start_date, end_date) elif chart_type == 'survey_distribution': data = UnifiedAnalyticsService._get_survey_distribution(surveys_qs) elif chart_type == 'sentiment_distribution': data = UnifiedAnalyticsService._get_sentiment_distribution(start_date, end_date) elif chart_type == 'department_performance': data = UnifiedAnalyticsService._get_department_performance( user, start_date, end_date, hospital_id ) elif chart_type == 'physician_leaderboard': data = UnifiedAnalyticsService._get_physician_leaderboard( user, start_date, end_date, hospital_id, department_id, limit=10 ) else: data = {'error': f'Unknown chart type: {chart_type}'} cache.set(cache_key, data, UnifiedAnalyticsService.CACHE_TIMEOUT) return data @staticmethod def _get_complaints_trend(queryset, start_date, end_date) -> Dict[str, Any]: """Get complaints trend over time (grouped by day)""" data = [] current_date = start_date while current_date <= end_date: next_date = current_date + timedelta(days=1) count = queryset.filter( created_at__gte=current_date, created_at__lt=next_date ).count() data.append({ 'date': current_date.strftime('%Y-%m-%d'), 'count': count }) current_date = next_date return { 'type': 'line', 'labels': [d['date'] for d in data], 'series': [{'name': 'Complaints', 'data': [d['count'] for d in data]}] } @staticmethod def _get_complaints_by_category(queryset) -> Dict[str, Any]: """Get complaints breakdown by category""" categories = queryset.values('category').annotate( count=Count('id') ).order_by('-count') return { 'type': 'donut', 'labels': [c['category'] or 'Uncategorized' for c in categories], 'series': [c['count'] for c in categories] } @staticmethod def _get_complaints_by_severity(queryset) -> Dict[str, Any]: """Get complaints breakdown by severity""" severity_counts = queryset.values('severity').annotate( count=Count('id') ).order_by('-count') severity_labels = { 'low': 'Low', 'medium': 'Medium', 'high': 'High', 'critical': 'Critical' } return { 'type': 'pie', 'labels': [severity_labels.get(s['severity'], s['severity']) for s in severity_counts], 'series': [s['count'] for s in severity_counts] } @staticmethod def _get_survey_satisfaction_trend(queryset, start_date, end_date) -> Dict[str, Any]: """Get survey satisfaction trend over time""" data = [] current_date = start_date while current_date <= end_date: next_date = current_date + timedelta(days=1) avg_score = queryset.filter( completed_at__gte=current_date, completed_at__lt=next_date ).aggregate(avg=Avg('total_score'))['avg'] or 0 data.append({ 'date': current_date.strftime('%Y-%m-%d'), 'score': round(avg_score, 2) }) current_date = next_date return { 'type': 'line', 'labels': [d['date'] for d in data], 'series': [{'name': 'Satisfaction', 'data': [d['score'] for d in data]}] } @staticmethod def _get_survey_distribution(queryset) -> Dict[str, Any]: """Get survey distribution by satisfaction level""" distribution = { 'excellent': queryset.filter(total_score__gte=4.5).count(), 'good': queryset.filter(total_score__gte=3.5, total_score__lt=4.5).count(), 'average': queryset.filter(total_score__gte=2.5, total_score__lt=3.5).count(), 'poor': queryset.filter(total_score__lt=2.5).count(), } return { 'type': 'donut', 'labels': ['Excellent', 'Good', 'Average', 'Poor'], 'series': [ distribution['excellent'], distribution['good'], distribution['average'], distribution['poor'] ] } @staticmethod def get_staff_performance_metrics( user, date_range: str = '30d', hospital_id: Optional[str] = None, department_id: Optional[str] = None, staff_ids: Optional[List[str]] = None, custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None ) -> Dict[str, Any]: """ Get performance metrics for staff members. Args: user: Current user date_range: Date range filter hospital_id: Optional hospital filter department_id: Optional department filter staff_ids: Optional list of specific staff IDs to evaluate custom_start: Custom start date custom_end: Custom end date Returns: dict: Staff performance metrics with complaints and inquiries data """ from apps.accounts.models import User start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) # Get staff queryset staff_qs = User.objects.all() # Filter by role if not user.is_px_admin() and user.hospital: staff_qs = staff_qs.filter(hospital=user.hospital) # Apply filters if hospital_id: staff_qs = staff_qs.filter(hospital_id=hospital_id) if department_id: staff_qs = staff_qs.filter(department_id=department_id) if staff_ids: staff_qs = staff_qs.filter(id__in=staff_ids) # Only staff with assigned complaints or inquiries staff_qs = staff_qs.filter( Q(assigned_complaints__isnull=False) | Q(assigned_inquiries__isnull=False) ).distinct().prefetch_related('assigned_complaints', 'assigned_inquiries') staff_metrics = [] for staff_member in staff_qs: # Get complaints assigned to this staff complaints = Complaint.objects.filter( assigned_to=staff_member, created_at__gte=start_date, created_at__lte=end_date ) # Get inquiries assigned to this staff inquiries = Inquiry.objects.filter( assigned_to=staff_member, created_at__gte=start_date, created_at__lte=end_date ) # Calculate complaint metrics complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints) # Calculate inquiry metrics inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries) staff_metrics.append({ 'id': str(staff_member.id), 'name': f"{staff_member.first_name} {staff_member.last_name}", 'email': staff_member.email, 'hospital': staff_member.hospital.name if staff_member.hospital else None, 'department': staff_member.department.name if staff_member.department else None, 'complaints': complaint_metrics, 'inquiries': inquiry_metrics }) return { 'staff_metrics': staff_metrics, 'start_date': start_date.isoformat(), 'end_date': end_date.isoformat(), 'date_range': date_range } @staticmethod def _calculate_complaint_metrics(complaints_qs) -> Dict[str, Any]: """Calculate detailed metrics for complaints""" total = complaints_qs.count() if total == 0: return { 'total': 0, 'internal': 0, 'external': 0, 'status': {'open': 0, 'in_progress': 0, 'resolved': 0, 'closed': 0}, 'activation_time': {'within_2h': 0, 'more_than_2h': 0, 'not_assigned': 0}, 'response_time': {'within_24h': 0, 'within_48h': 0, 'within_72h': 0, 'more_than_72h': 0, 'not_responded': 0} } # Source breakdown internal_count = complaints_qs.filter(source__name_en='staff').count() external_count = total - internal_count # Status breakdown status_counts = { 'open': complaints_qs.filter(status='open').count(), 'in_progress': complaints_qs.filter(status='in_progress').count(), 'resolved': complaints_qs.filter(status='resolved').count(), 'closed': complaints_qs.filter(status='closed').count() } # Activation time (assigned_at - created_at) activation_within_2h = 0 activation_more_than_2h = 0 not_assigned = 0 for complaint in complaints_qs: if complaint.assigned_at: activation_time = (complaint.assigned_at - complaint.created_at).total_seconds() if activation_time <= 7200: # 2 hours activation_within_2h += 1 else: activation_more_than_2h += 1 else: not_assigned += 1 # Response time (time to first update) response_within_24h = 0 response_within_48h = 0 response_within_72h = 0 response_more_than_72h = 0 not_responded = 0 for complaint in complaints_qs: first_update = complaint.updates.first() if first_update: response_time = (first_update.created_at - complaint.created_at).total_seconds() if response_time <= 86400: # 24 hours response_within_24h += 1 elif response_time <= 172800: # 48 hours response_within_48h += 1 elif response_time <= 259200: # 72 hours response_within_72h += 1 else: response_more_than_72h += 1 else: not_responded += 1 return { 'total': total, 'internal': internal_count, 'external': external_count, 'status': status_counts, 'activation_time': { 'within_2h': activation_within_2h, 'more_than_2h': activation_more_than_2h, 'not_assigned': not_assigned }, 'response_time': { 'within_24h': response_within_24h, 'within_48h': response_within_48h, 'within_72h': response_within_72h, 'more_than_72h': response_more_than_72h, 'not_responded': not_responded } } @staticmethod def _calculate_inquiry_metrics(inquiries_qs) -> Dict[str, Any]: """Calculate detailed metrics for inquiries""" total = inquiries_qs.count() if total == 0: return { 'total': 0, 'status': {'open': 0, 'in_progress': 0, 'resolved': 0, 'closed': 0}, 'response_time': {'within_24h': 0, 'within_48h': 0, 'within_72h': 0, 'more_than_72h': 0, 'not_responded': 0} } # Status breakdown status_counts = { 'open': inquiries_qs.filter(status='open').count(), 'in_progress': inquiries_qs.filter(status='in_progress').count(), 'resolved': inquiries_qs.filter(status='resolved').count(), 'closed': inquiries_qs.filter(status='closed').count() } # Response time (responded_at - created_at) response_within_24h = 0 response_within_48h = 0 response_within_72h = 0 response_more_than_72h = 0 not_responded = 0 for inquiry in inquiries_qs: if inquiry.responded_at: response_time = (inquiry.responded_at - inquiry.created_at).total_seconds() if response_time <= 86400: # 24 hours response_within_24h += 1 elif response_time <= 172800: # 48 hours response_within_48h += 1 elif response_time <= 259200: # 72 hours response_within_72h += 1 else: response_more_than_72h += 1 else: not_responded += 1 return { 'total': total, 'status': status_counts, 'response_time': { 'within_24h': response_within_24h, 'within_48h': response_within_48h, 'within_72h': response_within_72h, 'more_than_72h': response_more_than_72h, 'not_responded': not_responded } } @staticmethod def _get_sentiment_distribution(start_date, end_date) -> Dict[str, Any]: """Get sentiment analysis distribution""" queryset = SentimentResult.objects.filter( created_at__gte=start_date, created_at__lte=end_date ) distribution = queryset.values('sentiment').annotate( count=Count('id') ) sentiment_labels = { 'positive': 'Positive', 'neutral': 'Neutral', 'negative': 'Negative' } sentiment_order = ['positive', 'neutral', 'negative'] return { 'type': 'donut', 'labels': [sentiment_labels.get(s['sentiment'], s['sentiment']) for s in distribution], 'series': [s['count'] for s in distribution] } @staticmethod def _get_department_performance( user, start_date, end_date, hospital_id: Optional[str] = None ) -> Dict[str, Any]: """Get department performance rankings""" queryset = Department.objects.filter(status='active') if hospital_id: queryset = queryset.filter(hospital_id=hospital_id) elif not user.is_px_admin() and user.hospital: queryset = queryset.filter(hospital=user.hospital) # Annotate with survey data # SurveyInstance links to PatientJourneyInstance which has department field departments = queryset.annotate( avg_survey_score=Avg( 'journey_instances__surveys__total_score', filter=Q(journey_instances__surveys__status='completed', journey_instances__surveys__completed_at__gte=start_date, journey_instances__surveys__completed_at__lte=end_date) ), survey_count=Count( 'journey_instances__surveys', filter=Q(journey_instances__surveys__status='completed', journey_instances__surveys__completed_at__gte=start_date, journey_instances__surveys__completed_at__lte=end_date) ) ).filter(survey_count__gt=0).order_by('-avg_survey_score')[:10] return { 'type': 'bar', 'labels': [d.name for d in departments], 'series': [{ 'name': 'Average Score', 'data': [round(d.avg_survey_score or 0, 2) for d in departments] }] } @staticmethod def _get_physician_leaderboard( user, start_date, end_date, hospital_id: Optional[str] = None, department_id: Optional[str] = None, limit: int = 10 ) -> Dict[str, Any]: """Get physician leaderboard for the current period""" now = timezone.now() queryset = PhysicianMonthlyRating.objects.filter( year=now.year, month=now.month ).select_related('staff', 'staff__hospital', 'staff__department') # Apply RBAC filters if not user.is_px_admin() and user.hospital: queryset = queryset.filter(staff__hospital=user.hospital) if hospital_id: queryset = queryset.filter(staff__hospital_id=hospital_id) if department_id: queryset = queryset.filter(staff__department_id=department_id) queryset = queryset.order_by('-average_rating')[:limit] return { 'type': 'bar', 'labels': [f"{r.staff.first_name} {r.staff.last_name}" for r in queryset], 'series': [{ 'name': 'Rating', 'data': [float(round(r.average_rating, 2)) for r in queryset] }], 'metadata': [ { 'name': f"{r.staff.first_name} {r.staff.last_name}", 'physician_id': str(r.staff.id), 'specialization': r.staff.specialization, 'department': r.staff.department.name if r.staff.department else None, 'rating': float(round(r.average_rating, 2)), 'surveys': int(r.total_surveys) if r.total_surveys is not None else 0, 'positive': int(r.positive_count) if r.positive_count is not None else 0, 'neutral': int(r.neutral_count) if r.neutral_count is not None else 0, 'negative': int(r.negative_count) if r.negative_count is not None else 0 } for r in queryset ] } # ============================================================================ # ENHANCED ADMIN EVALUATION - Staff Performance Analytics # ============================================================================ @staticmethod def get_staff_detailed_performance( staff_id: str, user, date_range: str = '30d', custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None ) -> Dict[str, Any]: """ Get detailed performance metrics for a single staff member. Args: staff_id: Staff member UUID user: Current user (for permission checking) date_range: Date range filter custom_start: Custom start date custom_end: Custom end date Returns: dict: Detailed performance metrics with timeline """ from apps.accounts.models import User start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) staff = User.objects.select_related('hospital', 'department').get(id=staff_id) # Check permissions if not user.is_px_admin(): if user.hospital and staff.hospital != user.hospital: raise PermissionError("Cannot view staff from other hospitals") # Get complaints with timeline complaints = Complaint.objects.filter( assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date ).order_by('created_at') # Get inquiries with timeline inquiries = Inquiry.objects.filter( assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date ).order_by('created_at') # Calculate daily workload for trend daily_stats = {} current = start_date.date() end = end_date.date() while current <= end: daily_stats[current.isoformat()] = { 'complaints_created': 0, 'complaints_resolved': 0, 'inquiries_created': 0, 'inquiries_resolved': 0 } current += timedelta(days=1) for c in complaints: date_key = c.created_at.date().isoformat() if date_key in daily_stats: daily_stats[date_key]['complaints_created'] += 1 if c.status in ['resolved', 'closed'] and c.resolved_at: resolve_key = c.resolved_at.date().isoformat() if resolve_key in daily_stats: daily_stats[resolve_key]['complaints_resolved'] += 1 for i in inquiries: date_key = i.created_at.date().isoformat() if date_key in daily_stats: daily_stats[date_key]['inquiries_created'] += 1 if i.status in ['resolved', 'closed'] and i.responded_at: respond_key = i.responded_at.date().isoformat() if respond_key in daily_stats: daily_stats[respond_key]['inquiries_resolved'] += 1 # Calculate performance score (0-100) complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints) inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries) performance_score = UnifiedAnalyticsService._calculate_performance_score( complaint_metrics, inquiry_metrics ) # Get recent items recent_complaints = complaints.select_related('patient', 'hospital').order_by('-created_at')[:10] recent_inquiries = inquiries.select_related('patient', 'hospital').order_by('-created_at')[:10] return { 'staff': { 'id': str(staff.id), 'name': f"{staff.first_name} {staff.last_name}", 'email': staff.email, 'hospital': staff.hospital.name if staff.hospital else None, 'department': staff.department.name if staff.department else None, 'role': staff.get_role_names()[0] if staff.get_role_names() else 'Staff' }, 'performance_score': performance_score, 'period': { 'start': start_date.isoformat(), 'end': end_date.isoformat(), 'days': (end_date - start_date).days }, 'summary': { 'total_complaints': complaint_metrics['total'], 'total_inquiries': inquiry_metrics['total'], 'complaint_resolution_rate': round( (complaint_metrics['status']['resolved'] + complaint_metrics['status']['closed']) / max(complaint_metrics['total'], 1) * 100, 1 ), 'inquiry_resolution_rate': round( (inquiry_metrics['status']['resolved'] + inquiry_metrics['status']['closed']) / max(inquiry_metrics['total'], 1) * 100, 1 ) }, 'complaint_metrics': complaint_metrics, 'inquiry_metrics': inquiry_metrics, 'daily_trends': daily_stats, 'recent_complaints': [ { 'id': str(c.id), 'title': c.title, 'status': c.status, 'severity': c.severity, 'created_at': c.created_at.isoformat(), 'patient': c.patient.get_full_name() if c.patient else None } for c in recent_complaints ], 'recent_inquiries': [ { 'id': str(i.id), 'subject': i.subject, 'status': i.status, 'created_at': i.created_at.isoformat(), 'patient': i.patient.get_full_name() if i.patient else None } for i in recent_inquiries ] } @staticmethod def _calculate_performance_score(complaint_metrics: Dict, inquiry_metrics: Dict) -> Dict[str, Any]: """ Calculate an overall performance score (0-100) based on multiple factors. Returns score breakdown and overall rating. """ scores = { 'complaint_resolution': 0, 'complaint_response_time': 0, 'complaint_activation_time': 0, 'inquiry_resolution': 0, 'inquiry_response_time': 0, 'workload': 0 } total_complaints = complaint_metrics['total'] total_inquiries = inquiry_metrics['total'] if total_complaints > 0: # Resolution score (40% weight) resolved = complaint_metrics['status']['resolved'] + complaint_metrics['status']['closed'] scores['complaint_resolution'] = min(100, (resolved / total_complaints) * 100) # Response time score (20% weight) response = complaint_metrics['response_time'] on_time = response['within_24h'] + response['within_48h'] total_with_response = on_time + response['within_72h'] + response['more_than_72h'] if total_with_response > 0: scores['complaint_response_time'] = min(100, (on_time / total_with_response) * 100) # Activation time score (10% weight) activation = complaint_metrics['activation_time'] if activation['within_2h'] + activation['more_than_2h'] > 0: scores['complaint_activation_time'] = min(100, (activation['within_2h'] / (activation['within_2h'] + activation['more_than_2h'])) * 100 ) if total_inquiries > 0: # Resolution score (15% weight) resolved = inquiry_metrics['status']['resolved'] + inquiry_metrics['status']['closed'] scores['inquiry_resolution'] = min(100, (resolved / total_inquiries) * 100) # Response time score (10% weight) response = inquiry_metrics['response_time'] on_time = response['within_24h'] + response['within_48h'] total_with_response = on_time + response['within_72h'] + response['more_than_72h'] if total_with_response > 0: scores['inquiry_response_time'] = min(100, (on_time / total_with_response) * 100) # Workload score based on having reasonable volume (5% weight) total_items = total_complaints + total_inquiries if total_items >= 5: scores['workload'] = 100 elif total_items > 0: scores['workload'] = (total_items / 5) * 100 # Calculate weighted overall score weights = { 'complaint_resolution': 0.25, 'complaint_response_time': 0.15, 'complaint_activation_time': 0.10, 'inquiry_resolution': 0.20, 'inquiry_response_time': 0.15, 'workload': 0.15 } overall_score = sum(scores[k] * weights[k] for k in scores) # Determine rating if overall_score >= 90: rating = 'Excellent' rating_color = 'success' elif overall_score >= 75: rating = 'Good' rating_color = 'info' elif overall_score >= 60: rating = 'Average' rating_color = 'warning' elif overall_score >= 40: rating = 'Below Average' rating_color = 'danger' else: rating = 'Needs Improvement' rating_color = 'dark' return { 'overall': round(overall_score, 1), 'breakdown': scores, 'rating': rating, 'rating_color': rating_color, 'total_items_handled': total_complaints + total_inquiries } @staticmethod def get_staff_performance_trends( staff_id: str, user, months: int = 6 ) -> List[Dict[str, Any]]: """ Get monthly performance trends for a staff member. Args: staff_id: Staff member UUID user: Current user months: Number of months to look back Returns: list: Monthly performance data """ from apps.accounts.models import User staff = User.objects.get(id=staff_id) # Check permissions if not user.is_px_admin(): if user.hospital and staff.hospital != user.hospital: raise PermissionError("Cannot view staff from other hospitals") trends = [] now = timezone.now() for i in range(months - 1, -1, -1): # Calculate month month_date = now - timedelta(days=i * 30) month_start = month_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) if month_date.month == 12: month_end = month_date.replace(year=month_date.year + 1, month=1, day=1) - timedelta(seconds=1) else: month_end = month_date.replace(month=month_date.month + 1, day=1) - timedelta(seconds=1) # Get complaints for this month complaints = Complaint.objects.filter( assigned_to=staff, created_at__gte=month_start, created_at__lte=month_end ) # Get inquiries for this month inquiries = Inquiry.objects.filter( assigned_to=staff, created_at__gte=month_start, created_at__lte=month_end ) complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints) inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries) score_data = UnifiedAnalyticsService._calculate_performance_score( complaint_metrics, inquiry_metrics ) trends.append({ 'month': month_start.strftime('%Y-%m'), 'month_name': month_start.strftime('%b %Y'), 'performance_score': score_data['overall'], 'rating': score_data['rating'], 'complaints_total': complaint_metrics['total'], 'complaints_resolved': complaint_metrics['status']['resolved'] + complaint_metrics['status']['closed'], 'inquiries_total': inquiry_metrics['total'], 'inquiries_resolved': inquiry_metrics['status']['resolved'] + inquiry_metrics['status']['closed'] }) return trends @staticmethod def get_department_benchmarks( user, department_id: Optional[str] = None, date_range: str = '30d', custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None ) -> Dict[str, Any]: """ Get benchmarking data comparing staff within a department. Args: user: Current user department_id: Optional department filter date_range: Date range filter custom_start: Custom start date custom_end: Custom end date Returns: dict: Benchmarking metrics """ from apps.accounts.models import User from apps.organizations.models import Department start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) # Get department if department_id: department = Department.objects.get(id=department_id) elif user.department: department = user.department else: return {'error': 'No department specified'} # Get all staff in department staff_qs = User.objects.filter( department=department, is_active=True ).filter( Q(assigned_complaints__isnull=False) | Q(assigned_inquiries__isnull=False) ).distinct() staff_scores = [] for staff in staff_qs: complaints = Complaint.objects.filter( assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date ) inquiries = Inquiry.objects.filter( assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date ) complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints) inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries) score_data = UnifiedAnalyticsService._calculate_performance_score( complaint_metrics, inquiry_metrics ) staff_scores.append({ 'id': str(staff.id), 'name': f"{staff.first_name} {staff.last_name}", 'score': score_data['overall'], 'rating': score_data['rating'], 'total_items': score_data['total_items_handled'], 'complaints': complaint_metrics['total'], 'inquiries': inquiry_metrics['total'] }) # Sort by score staff_scores.sort(key=lambda x: x['score'], reverse=True) # Calculate averages if staff_scores: avg_score = sum(s['score'] for s in staff_scores) / len(staff_scores) avg_items = sum(s['total_items'] for s in staff_scores) / len(staff_scores) else: avg_score = 0 avg_items = 0 return { 'department': department.name, 'period': { 'start': start_date.isoformat(), 'end': end_date.isoformat() }, 'staff_count': len(staff_scores), 'average_score': round(avg_score, 1), 'average_items_per_staff': round(avg_items, 1), 'top_performer': staff_scores[0] if staff_scores else None, 'needs_improvement': [s for s in staff_scores if s['score'] < 60], 'rankings': staff_scores } @staticmethod def export_staff_performance_report( staff_ids: List[str], user, date_range: str = '30d', custom_start: Optional[datetime] = None, custom_end: Optional[datetime] = None, format_type: str = 'csv' ) -> Dict[str, Any]: """ Generate exportable staff performance report. Args: staff_ids: List of staff UUIDs to include user: Current user date_range: Date range filter custom_start: Custom start date custom_end: Custom end date format_type: Export format ('csv', 'excel', 'json') Returns: dict: Report data and metadata """ start_date, end_date = UnifiedAnalyticsService._get_date_range( date_range, custom_start, custom_end ) # Get performance data performance_data = UnifiedAnalyticsService.get_staff_performance_metrics( user=user, date_range=date_range, staff_ids=staff_ids if staff_ids else None, custom_start=custom_start, custom_end=custom_end ) # Format for export export_rows = [] for staff in performance_data['staff_metrics']: c = staff['complaints'] i = staff['inquiries'] # Calculate additional metrics complaint_resolution_rate = 0 if c['total'] > 0: complaint_resolution_rate = round( (c['status']['resolved'] + c['status']['closed']) / c['total'] * 100, 1 ) inquiry_resolution_rate = 0 if i['total'] > 0: inquiry_resolution_rate = round( (i['status']['resolved'] + i['status']['closed']) / i['total'] * 100, 1 ) export_rows.append({ 'staff_name': staff['name'], 'email': staff['email'], 'hospital': staff['hospital'], 'department': staff['department'], 'complaints_total': c['total'], 'complaints_internal': c['internal'], 'complaints_external': c['external'], 'complaints_open': c['status']['open'], 'complaints_resolved': c['status']['resolved'], 'complaints_closed': c['status']['closed'], 'complaint_resolution_rate': f"{complaint_resolution_rate}%", 'complaint_activation_within_2h': c['activation_time']['within_2h'], 'complaint_response_within_24h': c['response_time']['within_24h'], 'inquiries_total': i['total'], 'inquiries_open': i['status']['open'], 'inquiries_resolved': i['status']['resolved'], 'inquiry_resolution_rate': f"{inquiry_resolution_rate}%", 'inquiry_response_within_24h': i['response_time']['within_24h'] }) return { 'format': format_type, 'generated_at': timezone.now().isoformat(), 'period': { 'start': start_date.isoformat(), 'end': end_date.isoformat() }, 'total_staff': len(export_rows), 'data': export_rows }