1902 lines
73 KiB
Python
1902 lines
73 KiB
Python
"""
|
|
Unified Analytics Service
|
|
|
|
Provides comprehensive analytics and metrics for the PX Command Center Dashboard.
|
|
Consolidates data from complaints, surveys, actions, physicians, and other modules.
|
|
"""
|
|
|
|
from datetime import datetime, timedelta
|
|
from typing import Dict, List, Optional, Any
|
|
|
|
from django.db.models import Avg, Count, Q, Sum, F, ExpressionWrapper, DurationField
|
|
from django.utils import timezone
|
|
from django.core.cache import cache
|
|
|
|
from apps.complaints.models import Complaint, Inquiry, ComplaintStatus
|
|
from apps.complaints.analytics import ComplaintAnalytics
|
|
from apps.px_action_center.models import PXAction
|
|
from apps.surveys.models import SurveyInstance
|
|
from apps.social.models import SocialComment
|
|
from apps.callcenter.models import CallCenterInteraction
|
|
from apps.physicians.models import PhysicianMonthlyRating
|
|
from apps.organizations.models import Department, Hospital
|
|
from apps.ai_engine.models import SentimentResult
|
|
from apps.analytics.models import KPI, KPIValue
|
|
|
|
|
|
class UnifiedAnalyticsService:
|
|
"""
|
|
Unified service for all PX360 analytics and KPIs.
|
|
|
|
Provides methods to retrieve:
|
|
- All KPIs with filters
|
|
- Chart data for various visualizations
|
|
- Department performance metrics
|
|
- Physician analytics
|
|
- Sentiment analysis metrics
|
|
- SLA compliance data
|
|
"""
|
|
|
|
# Cache timeout (in seconds) - 5 minutes for most data
|
|
CACHE_TIMEOUT = 300
|
|
|
|
@staticmethod
|
|
def _get_cache_key(prefix: str, **kwargs) -> str:
|
|
"""Generate cache key based on parameters"""
|
|
parts = [prefix]
|
|
for key, value in sorted(kwargs.items()):
|
|
if value is not None:
|
|
parts.append(f"{key}:{value}")
|
|
return ":".join(parts)
|
|
|
|
@staticmethod
|
|
def _get_date_range(date_range: str, custom_start=None, custom_end=None) -> tuple:
|
|
"""
|
|
Get start and end dates based on date_range parameter.
|
|
|
|
Args:
|
|
date_range: '7d', '30d', '90d', 'this_month', 'last_month', 'quarter', 'year', or 'custom'
|
|
custom_start: Custom start date (required if date_range='custom')
|
|
custom_end: Custom end date (required if date_range='custom')
|
|
|
|
Returns:
|
|
tuple: (start_date, end_date)
|
|
"""
|
|
now = timezone.now()
|
|
|
|
if date_range == "custom" and custom_start and custom_end:
|
|
return custom_start, custom_end
|
|
|
|
date_ranges = {
|
|
"7d": timedelta(days=7),
|
|
"30d": timedelta(days=30),
|
|
"90d": timedelta(days=90),
|
|
}
|
|
|
|
if date_range in date_ranges:
|
|
end_date = now
|
|
start_date = now - date_ranges[date_range]
|
|
return start_date, end_date
|
|
|
|
elif date_range == "this_month":
|
|
start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
end_date = now
|
|
return start_date, end_date
|
|
|
|
elif date_range == "last_month":
|
|
if now.month == 1:
|
|
start_date = now.replace(year=now.year - 1, month=12, day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
end_date = now.replace(year=now.year - 1, month=12, day=31, hour=23, minute=59, second=59)
|
|
else:
|
|
start_date = now.replace(month=now.month - 1, day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
# Get last day of previous month
|
|
next_month = now.replace(day=1)
|
|
last_day = (next_month - timedelta(days=1)).day
|
|
end_date = now.replace(month=now.month - 1, day=last_day, hour=23, minute=59, second=59)
|
|
return start_date, end_date
|
|
|
|
elif date_range == "quarter":
|
|
current_quarter = (now.month - 1) // 3
|
|
start_month = current_quarter * 3 + 1
|
|
start_date = now.replace(month=start_month, day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
end_date = now
|
|
return start_date, end_date
|
|
|
|
elif date_range == "year":
|
|
start_date = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
end_date = now
|
|
return start_date, end_date
|
|
|
|
# Default to 30 days
|
|
return now - timedelta(days=30), now
|
|
|
|
@staticmethod
|
|
def _filter_by_role(queryset, user) -> Any:
|
|
"""
|
|
Filter queryset based on user role and permissions.
|
|
|
|
Args:
|
|
queryset: Django queryset
|
|
user: User object
|
|
|
|
Returns:
|
|
Filtered queryset
|
|
"""
|
|
# Check if queryset has hospital/department fields
|
|
if hasattr(queryset.model, "hospital"):
|
|
if user.is_px_admin():
|
|
pass # See all
|
|
elif user.is_hospital_admin() and user.hospital:
|
|
queryset = queryset.filter(hospital=user.hospital)
|
|
elif user.is_department_manager() and user.department:
|
|
queryset = queryset.filter(department=user.department)
|
|
else:
|
|
queryset = queryset.none()
|
|
return queryset
|
|
|
|
@staticmethod
|
|
def get_all_kpis(
|
|
user,
|
|
date_range: str = "30d",
|
|
hospital_id: Optional[str] = None,
|
|
department_id: Optional[str] = None,
|
|
kpi_category: Optional[str] = None,
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get all KPIs with applied filters.
|
|
|
|
Args:
|
|
user: Current user
|
|
date_range: Date range filter
|
|
hospital_id: Optional hospital filter
|
|
department_id: Optional department filter
|
|
kpi_category: Optional KPI category filter
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: All KPI values
|
|
"""
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
cache_key = UnifiedAnalyticsService._get_cache_key(
|
|
"all_kpis",
|
|
user_id=user.id,
|
|
date_range=date_range,
|
|
hospital_id=hospital_id,
|
|
department_id=department_id,
|
|
kpi_category=kpi_category,
|
|
)
|
|
|
|
cached_data = cache.get(cache_key)
|
|
if cached_data:
|
|
return cached_data
|
|
|
|
# Get base querysets with role filtering
|
|
complaints_qs = UnifiedAnalyticsService._filter_by_role(Complaint.objects.all(), user).filter(
|
|
created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
actions_qs = UnifiedAnalyticsService._filter_by_role(PXAction.objects.all(), user).filter(
|
|
created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
surveys_qs = UnifiedAnalyticsService._filter_by_role(SurveyInstance.objects.all(), user).filter(
|
|
completed_at__gte=start_date, completed_at__lte=end_date, status="completed"
|
|
)
|
|
|
|
# Apply additional filters
|
|
if hospital_id:
|
|
hospital = Hospital.objects.filter(id=hospital_id).first()
|
|
if hospital:
|
|
complaints_qs = complaints_qs.filter(hospital=hospital)
|
|
actions_qs = actions_qs.filter(hospital=hospital)
|
|
surveys_qs = surveys_qs.filter(hospital=hospital)
|
|
|
|
if department_id:
|
|
department = Department.objects.filter(id=department_id).first()
|
|
if department:
|
|
complaints_qs = complaints_qs.filter(department=department)
|
|
actions_qs = actions_qs.filter(department=department)
|
|
surveys_qs = surveys_qs.filter(journey_stage_instance__department=department)
|
|
|
|
# Calculate KPIs
|
|
kpis = {
|
|
# Complaints KPIs
|
|
"total_complaints": int(complaints_qs.count()),
|
|
"open_complaints": int(complaints_qs.filter(status__in=["open", "in_progress"]).count()),
|
|
"overdue_complaints": int(complaints_qs.filter(is_overdue=True).count()),
|
|
"high_severity_complaints": int(complaints_qs.filter(severity__in=["high", "critical"]).count()),
|
|
"resolved_complaints": int(complaints_qs.filter(status__in=["resolved", "closed"]).count()),
|
|
# Actions KPIs
|
|
"total_actions": int(actions_qs.count()),
|
|
"open_actions": int(actions_qs.filter(status__in=["open", "in_progress"]).count()),
|
|
"overdue_actions": int(actions_qs.filter(is_overdue=True).count()),
|
|
"escalated_actions": int(actions_qs.filter(escalation_level__gt=0).count()),
|
|
"resolved_actions": int(actions_qs.filter(status="completed").count()),
|
|
# Survey KPIs
|
|
"total_surveys": int(surveys_qs.count()),
|
|
"negative_surveys": int(surveys_qs.filter(is_negative=True).count()),
|
|
"avg_survey_score": float(surveys_qs.aggregate(avg=Avg("total_score"))["avg"] or 0),
|
|
# Social Media KPIs
|
|
# Sentiment is stored in ai_analysis JSON field as ai_analysis.sentiment
|
|
'negative_social_comments': int(SocialComment.objects.filter(
|
|
ai_analysis__sentiment='negative',
|
|
published_at__gte=start_date,
|
|
published_at__lte=end_date
|
|
).count()),
|
|
|
|
# Call Center KPIs
|
|
"low_call_ratings": int(
|
|
CallCenterInteraction.objects.filter(
|
|
is_low_rating=True, call_started_at__gte=start_date, call_started_at__lte=end_date
|
|
).count()
|
|
),
|
|
# Sentiment KPIs
|
|
"total_sentiment_analyses": int(
|
|
SentimentResult.objects.filter(created_at__gte=start_date, created_at__lte=end_date).count()
|
|
),
|
|
}
|
|
|
|
# Add trends (compare with previous period)
|
|
prev_start, prev_end = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
# Shift back by same duration
|
|
duration = end_date - start_date
|
|
prev_start = start_date - duration
|
|
prev_end = end_date - duration
|
|
|
|
prev_complaints = int(complaints_qs.filter(created_at__gte=prev_start, created_at__lte=prev_end).count())
|
|
|
|
kpis["complaints_trend"] = {
|
|
"current": kpis["total_complaints"],
|
|
"previous": prev_complaints,
|
|
"percentage_change": float(
|
|
((kpis["total_complaints"] - prev_complaints) / prev_complaints * 100) if prev_complaints > 0 else 0
|
|
),
|
|
}
|
|
|
|
# Cache the results
|
|
cache.set(cache_key, kpis, UnifiedAnalyticsService.CACHE_TIMEOUT)
|
|
|
|
return kpis
|
|
|
|
@staticmethod
|
|
def get_chart_data(
|
|
user,
|
|
chart_type: str,
|
|
date_range: str = "30d",
|
|
hospital_id: Optional[str] = None,
|
|
department_id: Optional[str] = None,
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get data for specific chart types.
|
|
|
|
Args:
|
|
user: Current user
|
|
chart_type: Type of chart ('complaints_trend', 'sla_compliance', 'survey_satisfaction', etc.)
|
|
date_range: Date range filter
|
|
hospital_id: Optional hospital filter
|
|
department_id: Optional department filter
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: Chart data in format suitable for ApexCharts
|
|
"""
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
cache_key = UnifiedAnalyticsService._get_cache_key(
|
|
f"chart_{chart_type}",
|
|
user_id=user.id,
|
|
date_range=date_range,
|
|
hospital_id=hospital_id,
|
|
department_id=department_id,
|
|
)
|
|
|
|
cached_data = cache.get(cache_key)
|
|
if cached_data:
|
|
return cached_data
|
|
|
|
# Get base complaint queryset
|
|
complaints_qs = UnifiedAnalyticsService._filter_by_role(Complaint.objects.all(), user).filter(
|
|
created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
surveys_qs = UnifiedAnalyticsService._filter_by_role(SurveyInstance.objects.all(), user).filter(
|
|
completed_at__gte=start_date, completed_at__lte=end_date, status="completed"
|
|
)
|
|
|
|
# Apply filters
|
|
if hospital_id:
|
|
complaints_qs = complaints_qs.filter(hospital_id=hospital_id)
|
|
surveys_qs = surveys_qs.filter(hospital_id=hospital_id)
|
|
|
|
if department_id:
|
|
complaints_qs = complaints_qs.filter(department_id=department_id)
|
|
surveys_qs = surveys_qs.filter(journey_stage_instance__department_id=department_id)
|
|
|
|
if chart_type == "complaints_trend":
|
|
data = UnifiedAnalyticsService._get_complaints_trend(complaints_qs, start_date, end_date)
|
|
|
|
elif chart_type == "complaints_by_category":
|
|
data = UnifiedAnalyticsService._get_complaints_by_category(complaints_qs)
|
|
|
|
elif chart_type == "complaints_by_severity":
|
|
data = UnifiedAnalyticsService._get_complaints_by_severity(complaints_qs)
|
|
|
|
elif chart_type == "sla_compliance":
|
|
data = ComplaintAnalytics.get_sla_compliance(
|
|
hospital_id and Hospital.objects.filter(id=hospital_id).first(), days=(end_date - start_date).days
|
|
)
|
|
|
|
elif chart_type == "resolution_rate":
|
|
data = ComplaintAnalytics.get_resolution_rate(
|
|
hospital_id and Hospital.objects.filter(id=hospital_id).first(), days=(end_date - start_date).days
|
|
)
|
|
|
|
elif chart_type == "survey_satisfaction_trend":
|
|
data = UnifiedAnalyticsService._get_survey_satisfaction_trend(surveys_qs, start_date, end_date)
|
|
|
|
elif chart_type == "survey_distribution":
|
|
data = UnifiedAnalyticsService._get_survey_distribution(surveys_qs)
|
|
|
|
elif chart_type == "sentiment_distribution":
|
|
data = UnifiedAnalyticsService._get_sentiment_distribution(start_date, end_date)
|
|
|
|
elif chart_type == "department_performance":
|
|
data = UnifiedAnalyticsService._get_department_performance(user, start_date, end_date, hospital_id)
|
|
|
|
elif chart_type == "physician_leaderboard":
|
|
data = UnifiedAnalyticsService._get_physician_leaderboard(
|
|
user, start_date, end_date, hospital_id, department_id, limit=10
|
|
)
|
|
|
|
else:
|
|
data = {"error": f"Unknown chart type: {chart_type}"}
|
|
|
|
cache.set(cache_key, data, UnifiedAnalyticsService.CACHE_TIMEOUT)
|
|
|
|
return data
|
|
|
|
@staticmethod
|
|
def _get_complaints_trend(queryset, start_date, end_date) -> Dict[str, Any]:
|
|
"""Get complaints trend over time (grouped by day)"""
|
|
data = []
|
|
current_date = start_date
|
|
while current_date <= end_date:
|
|
next_date = current_date + timedelta(days=1)
|
|
count = queryset.filter(created_at__gte=current_date, created_at__lt=next_date).count()
|
|
data.append({"date": current_date.strftime("%Y-%m-%d"), "count": count})
|
|
current_date = next_date
|
|
|
|
return {
|
|
"type": "line",
|
|
"labels": [d["date"] for d in data],
|
|
"series": [{"name": "Complaints", "data": [d["count"] for d in data]}],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_complaints_by_category(queryset) -> Dict[str, Any]:
|
|
"""Get complaints breakdown by category"""
|
|
categories = queryset.values("category").annotate(count=Count("id")).order_by("-count")
|
|
|
|
return {
|
|
"type": "donut",
|
|
"labels": [c["category"] or "Uncategorized" for c in categories],
|
|
"series": [c["count"] for c in categories],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_complaints_by_severity(queryset) -> Dict[str, Any]:
|
|
"""Get complaints breakdown by severity"""
|
|
severity_counts = queryset.values("severity").annotate(count=Count("id")).order_by("-count")
|
|
|
|
severity_labels = {"low": "Low", "medium": "Medium", "high": "High", "critical": "Critical"}
|
|
|
|
return {
|
|
"type": "pie",
|
|
"labels": [severity_labels.get(s["severity"], s["severity"]) for s in severity_counts],
|
|
"series": [s["count"] for s in severity_counts],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_survey_satisfaction_trend(queryset, start_date, end_date) -> Dict[str, Any]:
|
|
"""Get survey satisfaction trend over time"""
|
|
data = []
|
|
current_date = start_date
|
|
while current_date <= end_date:
|
|
next_date = current_date + timedelta(days=1)
|
|
avg_score = (
|
|
queryset.filter(completed_at__gte=current_date, completed_at__lt=next_date).aggregate(
|
|
avg=Avg("total_score")
|
|
)["avg"]
|
|
or 0
|
|
)
|
|
data.append({"date": current_date.strftime("%Y-%m-%d"), "score": round(avg_score, 2)})
|
|
current_date = next_date
|
|
|
|
return {
|
|
"type": "line",
|
|
"labels": [d["date"] for d in data],
|
|
"series": [{"name": "Satisfaction", "data": [d["score"] for d in data]}],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_survey_distribution(queryset) -> Dict[str, Any]:
|
|
"""Get survey distribution by satisfaction level"""
|
|
distribution = {
|
|
"excellent": queryset.filter(total_score__gte=4.5).count(),
|
|
"good": queryset.filter(total_score__gte=3.5, total_score__lt=4.5).count(),
|
|
"average": queryset.filter(total_score__gte=2.5, total_score__lt=3.5).count(),
|
|
"poor": queryset.filter(total_score__lt=2.5).count(),
|
|
}
|
|
|
|
return {
|
|
"type": "donut",
|
|
"labels": ["Excellent", "Good", "Average", "Poor"],
|
|
"series": [distribution["excellent"], distribution["good"], distribution["average"], distribution["poor"]],
|
|
}
|
|
|
|
@staticmethod
|
|
def get_staff_performance_metrics(
|
|
user,
|
|
date_range: str = "30d",
|
|
hospital_id: Optional[str] = None,
|
|
department_id: Optional[str] = None,
|
|
staff_ids: Optional[List[str]] = None,
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get performance metrics for staff members.
|
|
|
|
Args:
|
|
user: Current user
|
|
date_range: Date range filter
|
|
hospital_id: Optional hospital filter
|
|
department_id: Optional department filter
|
|
staff_ids: Optional list of specific staff IDs to evaluate
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: Staff performance metrics with complaints and inquiries data
|
|
"""
|
|
from apps.accounts.models import User
|
|
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
# Get staff queryset
|
|
staff_qs = User.objects.all()
|
|
|
|
# Filter by role
|
|
if not user.is_px_admin() and user.hospital:
|
|
staff_qs = staff_qs.filter(hospital=user.hospital)
|
|
|
|
# Apply filters
|
|
if hospital_id:
|
|
staff_qs = staff_qs.filter(hospital_id=hospital_id)
|
|
|
|
if department_id:
|
|
staff_qs = staff_qs.filter(department_id=department_id)
|
|
|
|
if staff_ids:
|
|
staff_qs = staff_qs.filter(id__in=staff_ids)
|
|
|
|
# Only staff with assigned complaints or inquiries
|
|
staff_qs = (
|
|
staff_qs.filter(Q(assigned_complaints__isnull=False) | Q(assigned_inquiries__isnull=False))
|
|
.distinct()
|
|
.prefetch_related("assigned_complaints", "assigned_inquiries")
|
|
)
|
|
|
|
staff_metrics = []
|
|
|
|
for staff_member in staff_qs:
|
|
# Get complaints assigned to this staff
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to=staff_member, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
# Get inquiries assigned to this staff
|
|
inquiries = Inquiry.objects.filter(
|
|
assigned_to=staff_member, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
# Calculate complaint metrics
|
|
complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints)
|
|
|
|
# Calculate inquiry metrics
|
|
inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries)
|
|
|
|
staff_metrics.append(
|
|
{
|
|
"id": str(staff_member.id),
|
|
"name": f"{staff_member.first_name} {staff_member.last_name}",
|
|
"email": staff_member.email,
|
|
"hospital": staff_member.hospital.name if staff_member.hospital else None,
|
|
"department": staff_member.department.name if staff_member.department else None,
|
|
"complaints": complaint_metrics,
|
|
"inquiries": inquiry_metrics,
|
|
}
|
|
)
|
|
|
|
return {
|
|
"staff_metrics": staff_metrics,
|
|
"start_date": start_date.isoformat(),
|
|
"end_date": end_date.isoformat(),
|
|
"date_range": date_range,
|
|
}
|
|
|
|
@staticmethod
|
|
def _calculate_complaint_metrics(complaints_qs) -> Dict[str, Any]:
|
|
"""Calculate detailed metrics for complaints"""
|
|
total = complaints_qs.count()
|
|
|
|
if total == 0:
|
|
return {
|
|
"total": 0,
|
|
"internal": 0,
|
|
"external": 0,
|
|
"status": {"open": 0, "in_progress": 0, "resolved": 0, "closed": 0},
|
|
"activation_time": {"within_2h": 0, "more_than_2h": 0, "not_assigned": 0},
|
|
"response_time": {
|
|
"within_24h": 0,
|
|
"within_48h": 0,
|
|
"within_72h": 0,
|
|
"more_than_72h": 0,
|
|
"not_responded": 0,
|
|
},
|
|
}
|
|
|
|
# Source breakdown
|
|
internal_count = complaints_qs.filter(source__name_en="staff").count()
|
|
external_count = total - internal_count
|
|
|
|
# Status breakdown
|
|
status_counts = {
|
|
"open": complaints_qs.filter(status="open").count(),
|
|
"in_progress": complaints_qs.filter(status="in_progress").count(),
|
|
"resolved": complaints_qs.filter(status="resolved").count(),
|
|
"closed": complaints_qs.filter(status="closed").count(),
|
|
}
|
|
|
|
# Activation time (assigned_at - created_at)
|
|
activation_within_2h = 0
|
|
activation_more_than_2h = 0
|
|
not_assigned = 0
|
|
|
|
for complaint in complaints_qs:
|
|
if complaint.assigned_at:
|
|
activation_time = (complaint.assigned_at - complaint.created_at).total_seconds()
|
|
if activation_time <= 7200: # 2 hours
|
|
activation_within_2h += 1
|
|
else:
|
|
activation_more_than_2h += 1
|
|
else:
|
|
not_assigned += 1
|
|
|
|
# Response time (time to first update)
|
|
response_within_24h = 0
|
|
response_within_48h = 0
|
|
response_within_72h = 0
|
|
response_more_than_72h = 0
|
|
not_responded = 0
|
|
|
|
for complaint in complaints_qs:
|
|
first_update = complaint.updates.first()
|
|
if first_update:
|
|
response_time = (first_update.created_at - complaint.created_at).total_seconds()
|
|
if response_time <= 86400: # 24 hours
|
|
response_within_24h += 1
|
|
elif response_time <= 172800: # 48 hours
|
|
response_within_48h += 1
|
|
elif response_time <= 259200: # 72 hours
|
|
response_within_72h += 1
|
|
else:
|
|
response_more_than_72h += 1
|
|
else:
|
|
not_responded += 1
|
|
|
|
return {
|
|
"total": total,
|
|
"internal": internal_count,
|
|
"external": external_count,
|
|
"status": status_counts,
|
|
"activation_time": {
|
|
"within_2h": activation_within_2h,
|
|
"more_than_2h": activation_more_than_2h,
|
|
"not_assigned": not_assigned,
|
|
},
|
|
"response_time": {
|
|
"within_24h": response_within_24h,
|
|
"within_48h": response_within_48h,
|
|
"within_72h": response_within_72h,
|
|
"more_than_72h": response_more_than_72h,
|
|
"not_responded": not_responded,
|
|
},
|
|
}
|
|
|
|
@staticmethod
|
|
def _calculate_inquiry_metrics(inquiries_qs) -> Dict[str, Any]:
|
|
"""Calculate detailed metrics for inquiries"""
|
|
total = inquiries_qs.count()
|
|
|
|
if total == 0:
|
|
return {
|
|
"total": 0,
|
|
"status": {"open": 0, "in_progress": 0, "resolved": 0, "closed": 0},
|
|
"response_time": {
|
|
"within_24h": 0,
|
|
"within_48h": 0,
|
|
"within_72h": 0,
|
|
"more_than_72h": 0,
|
|
"not_responded": 0,
|
|
},
|
|
}
|
|
|
|
# Status breakdown
|
|
status_counts = {
|
|
"open": inquiries_qs.filter(status="open").count(),
|
|
"in_progress": inquiries_qs.filter(status="in_progress").count(),
|
|
"resolved": inquiries_qs.filter(status="resolved").count(),
|
|
"closed": inquiries_qs.filter(status="closed").count(),
|
|
}
|
|
|
|
# Response time (responded_at - created_at)
|
|
response_within_24h = 0
|
|
response_within_48h = 0
|
|
response_within_72h = 0
|
|
response_more_than_72h = 0
|
|
not_responded = 0
|
|
|
|
for inquiry in inquiries_qs:
|
|
if inquiry.responded_at:
|
|
response_time = (inquiry.responded_at - inquiry.created_at).total_seconds()
|
|
if response_time <= 86400: # 24 hours
|
|
response_within_24h += 1
|
|
elif response_time <= 172800: # 48 hours
|
|
response_within_48h += 1
|
|
elif response_time <= 259200: # 72 hours
|
|
response_within_72h += 1
|
|
else:
|
|
response_more_than_72h += 1
|
|
else:
|
|
not_responded += 1
|
|
|
|
return {
|
|
"total": total,
|
|
"status": status_counts,
|
|
"response_time": {
|
|
"within_24h": response_within_24h,
|
|
"within_48h": response_within_48h,
|
|
"within_72h": response_within_72h,
|
|
"more_than_72h": response_more_than_72h,
|
|
"not_responded": not_responded,
|
|
},
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_sentiment_distribution(start_date, end_date) -> Dict[str, Any]:
|
|
"""Get sentiment analysis distribution"""
|
|
queryset = SentimentResult.objects.filter(created_at__gte=start_date, created_at__lte=end_date)
|
|
|
|
distribution = queryset.values("sentiment").annotate(count=Count("id"))
|
|
|
|
sentiment_labels = {"positive": "Positive", "neutral": "Neutral", "negative": "Negative"}
|
|
|
|
sentiment_order = ["positive", "neutral", "negative"]
|
|
|
|
return {
|
|
"type": "donut",
|
|
"labels": [sentiment_labels.get(s["sentiment"], s["sentiment"]) for s in distribution],
|
|
"series": [s["count"] for s in distribution],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_department_performance(user, start_date, end_date, hospital_id: Optional[str] = None) -> Dict[str, Any]:
|
|
"""Get department performance rankings"""
|
|
queryset = Department.objects.filter(status="active")
|
|
|
|
if hospital_id:
|
|
queryset = queryset.filter(hospital_id=hospital_id)
|
|
elif not user.is_px_admin() and user.hospital:
|
|
queryset = queryset.filter(hospital=user.hospital)
|
|
|
|
# Annotate with survey data
|
|
# SurveyInstance links to PatientJourneyInstance which has department field
|
|
departments = (
|
|
queryset.annotate(
|
|
avg_survey_score=Avg(
|
|
"journey_instances__surveys__total_score",
|
|
filter=Q(
|
|
journey_instances__surveys__status="completed",
|
|
journey_instances__surveys__completed_at__gte=start_date,
|
|
journey_instances__surveys__completed_at__lte=end_date,
|
|
),
|
|
),
|
|
survey_count=Count(
|
|
"journey_instances__surveys",
|
|
filter=Q(
|
|
journey_instances__surveys__status="completed",
|
|
journey_instances__surveys__completed_at__gte=start_date,
|
|
journey_instances__surveys__completed_at__lte=end_date,
|
|
),
|
|
),
|
|
)
|
|
.filter(survey_count__gt=0)
|
|
.order_by("-avg_survey_score")[:10]
|
|
)
|
|
|
|
return {
|
|
"type": "bar",
|
|
"labels": [d.name for d in departments],
|
|
"series": [{"name": "Average Score", "data": [round(d.avg_survey_score or 0, 2) for d in departments]}],
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_physician_leaderboard(
|
|
user,
|
|
start_date,
|
|
end_date,
|
|
hospital_id: Optional[str] = None,
|
|
department_id: Optional[str] = None,
|
|
limit: int = 10,
|
|
) -> Dict[str, Any]:
|
|
"""Get physician leaderboard for the current period"""
|
|
now = timezone.now()
|
|
queryset = PhysicianMonthlyRating.objects.filter(year=now.year, month=now.month).select_related(
|
|
"staff", "staff__hospital", "staff__department"
|
|
)
|
|
|
|
# Apply RBAC filters
|
|
if not user.is_px_admin() and user.hospital:
|
|
queryset = queryset.filter(staff__hospital=user.hospital)
|
|
|
|
if hospital_id:
|
|
queryset = queryset.filter(staff__hospital_id=hospital_id)
|
|
|
|
if department_id:
|
|
queryset = queryset.filter(staff__department_id=department_id)
|
|
|
|
queryset = queryset.order_by("-average_rating")[:limit]
|
|
|
|
return {
|
|
"type": "bar",
|
|
"labels": [f"{r.staff.first_name} {r.staff.last_name}" for r in queryset],
|
|
"series": [{"name": "Rating", "data": [float(round(r.average_rating, 2)) for r in queryset]}],
|
|
"metadata": [
|
|
{
|
|
"name": f"{r.staff.first_name} {r.staff.last_name}",
|
|
"physician_id": str(r.staff.id),
|
|
"specialization": r.staff.specialization,
|
|
"department": r.staff.department.name if r.staff.department else None,
|
|
"rating": float(round(r.average_rating, 2)),
|
|
"surveys": int(r.total_surveys) if r.total_surveys is not None else 0,
|
|
"positive": int(r.positive_count) if r.positive_count is not None else 0,
|
|
"neutral": int(r.neutral_count) if r.neutral_count is not None else 0,
|
|
"negative": int(r.negative_count) if r.negative_count is not None else 0,
|
|
}
|
|
for r in queryset
|
|
],
|
|
}
|
|
|
|
# ============================================================================
|
|
# ENHANCED ADMIN EVALUATION - Staff Performance Analytics
|
|
# ============================================================================
|
|
|
|
@staticmethod
|
|
def get_staff_detailed_performance(
|
|
staff_id: str,
|
|
user,
|
|
date_range: str = "30d",
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get detailed performance metrics for a single staff member.
|
|
|
|
Args:
|
|
staff_id: Staff member UUID
|
|
user: Current user (for permission checking)
|
|
date_range: Date range filter
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: Detailed performance metrics with timeline
|
|
"""
|
|
from apps.accounts.models import User
|
|
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
staff = User.objects.select_related("hospital", "department").get(id=staff_id)
|
|
|
|
# Check permissions
|
|
if not user.is_px_admin():
|
|
if user.hospital and staff.hospital != user.hospital:
|
|
raise PermissionError("Cannot view staff from other hospitals")
|
|
|
|
# Get complaints with timeline
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date
|
|
).order_by("created_at")
|
|
|
|
# Get inquiries with timeline
|
|
inquiries = Inquiry.objects.filter(
|
|
assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date
|
|
).order_by("created_at")
|
|
|
|
# Calculate daily workload for trend
|
|
daily_stats = {}
|
|
current = start_date.date()
|
|
end = end_date.date()
|
|
|
|
while current <= end:
|
|
daily_stats[current.isoformat()] = {
|
|
"complaints_created": 0,
|
|
"complaints_resolved": 0,
|
|
"inquiries_created": 0,
|
|
"inquiries_resolved": 0,
|
|
}
|
|
current += timedelta(days=1)
|
|
|
|
for c in complaints:
|
|
date_key = c.created_at.date().isoformat()
|
|
if date_key in daily_stats:
|
|
daily_stats[date_key]["complaints_created"] += 1
|
|
if c.status in ["resolved", "closed"] and c.resolved_at:
|
|
resolve_key = c.resolved_at.date().isoformat()
|
|
if resolve_key in daily_stats:
|
|
daily_stats[resolve_key]["complaints_resolved"] += 1
|
|
|
|
for i in inquiries:
|
|
date_key = i.created_at.date().isoformat()
|
|
if date_key in daily_stats:
|
|
daily_stats[date_key]["inquiries_created"] += 1
|
|
if i.status in ["resolved", "closed"] and i.responded_at:
|
|
respond_key = i.responded_at.date().isoformat()
|
|
if respond_key in daily_stats:
|
|
daily_stats[respond_key]["inquiries_resolved"] += 1
|
|
|
|
# Calculate performance score (0-100)
|
|
complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints)
|
|
inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries)
|
|
|
|
performance_score = UnifiedAnalyticsService._calculate_performance_score(complaint_metrics, inquiry_metrics)
|
|
|
|
# Get recent items
|
|
recent_complaints = complaints.select_related("patient", "hospital").order_by("-created_at")[:10]
|
|
recent_inquiries = inquiries.select_related("patient", "hospital").order_by("-created_at")[:10]
|
|
|
|
return {
|
|
"staff": {
|
|
"id": str(staff.id),
|
|
"name": f"{staff.first_name} {staff.last_name}",
|
|
"email": staff.email,
|
|
"hospital": staff.hospital.name if staff.hospital else None,
|
|
"department": staff.department.name if staff.department else None,
|
|
"role": staff.get_role_names()[0] if staff.get_role_names() else "Staff",
|
|
},
|
|
"performance_score": performance_score,
|
|
"period": {
|
|
"start": start_date.isoformat(),
|
|
"end": end_date.isoformat(),
|
|
"days": (end_date - start_date).days,
|
|
},
|
|
"summary": {
|
|
"total_complaints": complaint_metrics["total"],
|
|
"total_inquiries": inquiry_metrics["total"],
|
|
"complaint_resolution_rate": round(
|
|
(complaint_metrics["status"]["resolved"] + complaint_metrics["status"]["closed"])
|
|
/ max(complaint_metrics["total"], 1)
|
|
* 100,
|
|
1,
|
|
),
|
|
"inquiry_resolution_rate": round(
|
|
(inquiry_metrics["status"]["resolved"] + inquiry_metrics["status"]["closed"])
|
|
/ max(inquiry_metrics["total"], 1)
|
|
* 100,
|
|
1,
|
|
),
|
|
},
|
|
"complaint_metrics": complaint_metrics,
|
|
"inquiry_metrics": inquiry_metrics,
|
|
"daily_trends": daily_stats,
|
|
"recent_complaints": [
|
|
{
|
|
"id": str(c.id),
|
|
"title": c.title,
|
|
"status": c.status,
|
|
"severity": c.severity,
|
|
"created_at": c.created_at.isoformat(),
|
|
"patient": c.patient.get_full_name() if c.patient else None,
|
|
}
|
|
for c in recent_complaints
|
|
],
|
|
"recent_inquiries": [
|
|
{
|
|
"id": str(i.id),
|
|
"subject": i.subject,
|
|
"status": i.status,
|
|
"created_at": i.created_at.isoformat(),
|
|
"patient": i.patient.get_full_name() if i.patient else None,
|
|
}
|
|
for i in recent_inquiries
|
|
],
|
|
}
|
|
|
|
@staticmethod
|
|
def _calculate_performance_score(complaint_metrics: Dict, inquiry_metrics: Dict) -> Dict[str, Any]:
|
|
"""
|
|
Calculate an overall performance score (0-100) based on multiple factors.
|
|
|
|
Returns score breakdown and overall rating.
|
|
"""
|
|
scores = {
|
|
"complaint_resolution": 0,
|
|
"complaint_response_time": 0,
|
|
"complaint_activation_time": 0,
|
|
"inquiry_resolution": 0,
|
|
"inquiry_response_time": 0,
|
|
"workload": 0,
|
|
}
|
|
|
|
total_complaints = complaint_metrics["total"]
|
|
total_inquiries = inquiry_metrics["total"]
|
|
|
|
if total_complaints > 0:
|
|
# Resolution score (40% weight)
|
|
resolved = complaint_metrics["status"]["resolved"] + complaint_metrics["status"]["closed"]
|
|
scores["complaint_resolution"] = min(100, (resolved / total_complaints) * 100)
|
|
|
|
# Response time score (20% weight)
|
|
response = complaint_metrics["response_time"]
|
|
on_time = response["within_24h"] + response["within_48h"]
|
|
total_with_response = on_time + response["within_72h"] + response["more_than_72h"]
|
|
if total_with_response > 0:
|
|
scores["complaint_response_time"] = min(100, (on_time / total_with_response) * 100)
|
|
|
|
# Activation time score (10% weight)
|
|
activation = complaint_metrics["activation_time"]
|
|
if activation["within_2h"] + activation["more_than_2h"] > 0:
|
|
scores["complaint_activation_time"] = min(
|
|
100, (activation["within_2h"] / (activation["within_2h"] + activation["more_than_2h"])) * 100
|
|
)
|
|
|
|
if total_inquiries > 0:
|
|
# Resolution score (15% weight)
|
|
resolved = inquiry_metrics["status"]["resolved"] + inquiry_metrics["status"]["closed"]
|
|
scores["inquiry_resolution"] = min(100, (resolved / total_inquiries) * 100)
|
|
|
|
# Response time score (10% weight)
|
|
response = inquiry_metrics["response_time"]
|
|
on_time = response["within_24h"] + response["within_48h"]
|
|
total_with_response = on_time + response["within_72h"] + response["more_than_72h"]
|
|
if total_with_response > 0:
|
|
scores["inquiry_response_time"] = min(100, (on_time / total_with_response) * 100)
|
|
|
|
# Workload score based on having reasonable volume (5% weight)
|
|
total_items = total_complaints + total_inquiries
|
|
if total_items >= 5:
|
|
scores["workload"] = 100
|
|
elif total_items > 0:
|
|
scores["workload"] = (total_items / 5) * 100
|
|
|
|
# Calculate weighted overall score
|
|
weights = {
|
|
"complaint_resolution": 0.25,
|
|
"complaint_response_time": 0.15,
|
|
"complaint_activation_time": 0.10,
|
|
"inquiry_resolution": 0.20,
|
|
"inquiry_response_time": 0.15,
|
|
"workload": 0.15,
|
|
}
|
|
|
|
overall_score = sum(scores[k] * weights[k] for k in scores)
|
|
|
|
# Determine rating
|
|
if overall_score >= 90:
|
|
rating = "Excellent"
|
|
rating_color = "success"
|
|
elif overall_score >= 75:
|
|
rating = "Good"
|
|
rating_color = "info"
|
|
elif overall_score >= 60:
|
|
rating = "Average"
|
|
rating_color = "warning"
|
|
elif overall_score >= 40:
|
|
rating = "Below Average"
|
|
rating_color = "danger"
|
|
else:
|
|
rating = "Needs Improvement"
|
|
rating_color = "dark"
|
|
|
|
return {
|
|
"overall": round(overall_score, 1),
|
|
"breakdown": scores,
|
|
"rating": rating,
|
|
"rating_color": rating_color,
|
|
"total_items_handled": total_complaints + total_inquiries,
|
|
}
|
|
|
|
@staticmethod
|
|
def get_staff_performance_trends(staff_id: str, user, months: int = 6) -> List[Dict[str, Any]]:
|
|
"""
|
|
Get monthly performance trends for a staff member.
|
|
|
|
Args:
|
|
staff_id: Staff member UUID
|
|
user: Current user
|
|
months: Number of months to look back
|
|
|
|
Returns:
|
|
list: Monthly performance data
|
|
"""
|
|
from apps.accounts.models import User
|
|
|
|
staff = User.objects.get(id=staff_id)
|
|
|
|
# Check permissions
|
|
if not user.is_px_admin():
|
|
if user.hospital and staff.hospital != user.hospital:
|
|
raise PermissionError("Cannot view staff from other hospitals")
|
|
|
|
trends = []
|
|
now = timezone.now()
|
|
|
|
for i in range(months - 1, -1, -1):
|
|
# Calculate month
|
|
month_date = now - timedelta(days=i * 30)
|
|
month_start = month_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
if month_date.month == 12:
|
|
month_end = month_date.replace(year=month_date.year + 1, month=1, day=1) - timedelta(seconds=1)
|
|
else:
|
|
month_end = month_date.replace(month=month_date.month + 1, day=1) - timedelta(seconds=1)
|
|
|
|
# Get complaints for this month
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to=staff, created_at__gte=month_start, created_at__lte=month_end
|
|
)
|
|
|
|
# Get inquiries for this month
|
|
inquiries = Inquiry.objects.filter(
|
|
assigned_to=staff, created_at__gte=month_start, created_at__lte=month_end
|
|
)
|
|
|
|
complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints)
|
|
inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries)
|
|
|
|
score_data = UnifiedAnalyticsService._calculate_performance_score(complaint_metrics, inquiry_metrics)
|
|
|
|
trends.append(
|
|
{
|
|
"month": month_start.strftime("%Y-%m"),
|
|
"month_name": month_start.strftime("%b %Y"),
|
|
"performance_score": score_data["overall"],
|
|
"rating": score_data["rating"],
|
|
"complaints_total": complaint_metrics["total"],
|
|
"complaints_resolved": complaint_metrics["status"]["resolved"]
|
|
+ complaint_metrics["status"]["closed"],
|
|
"inquiries_total": inquiry_metrics["total"],
|
|
"inquiries_resolved": inquiry_metrics["status"]["resolved"] + inquiry_metrics["status"]["closed"],
|
|
}
|
|
)
|
|
|
|
return trends
|
|
|
|
@staticmethod
|
|
def get_department_benchmarks(
|
|
user,
|
|
department_id: Optional[str] = None,
|
|
date_range: str = "30d",
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get benchmarking data comparing staff within a department.
|
|
|
|
Args:
|
|
user: Current user
|
|
department_id: Optional department filter
|
|
date_range: Date range filter
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: Benchmarking metrics
|
|
"""
|
|
from apps.accounts.models import User
|
|
from apps.organizations.models import Department
|
|
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
# Get department
|
|
if department_id:
|
|
department = Department.objects.get(id=department_id)
|
|
elif user.department:
|
|
department = user.department
|
|
else:
|
|
return {"error": "No department specified"}
|
|
|
|
# Get all staff in department
|
|
staff_qs = (
|
|
User.objects.filter(department=department, is_active=True)
|
|
.filter(Q(assigned_complaints__isnull=False) | Q(assigned_inquiries__isnull=False))
|
|
.distinct()
|
|
)
|
|
|
|
staff_scores = []
|
|
|
|
for staff in staff_qs:
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
inquiries = Inquiry.objects.filter(assigned_to=staff, created_at__gte=start_date, created_at__lte=end_date)
|
|
|
|
complaint_metrics = UnifiedAnalyticsService._calculate_complaint_metrics(complaints)
|
|
inquiry_metrics = UnifiedAnalyticsService._calculate_inquiry_metrics(inquiries)
|
|
|
|
score_data = UnifiedAnalyticsService._calculate_performance_score(complaint_metrics, inquiry_metrics)
|
|
|
|
staff_scores.append(
|
|
{
|
|
"id": str(staff.id),
|
|
"name": f"{staff.first_name} {staff.last_name}",
|
|
"score": score_data["overall"],
|
|
"rating": score_data["rating"],
|
|
"total_items": score_data["total_items_handled"],
|
|
"complaints": complaint_metrics["total"],
|
|
"inquiries": inquiry_metrics["total"],
|
|
}
|
|
)
|
|
|
|
# Sort by score
|
|
staff_scores.sort(key=lambda x: x["score"], reverse=True)
|
|
|
|
# Calculate averages
|
|
if staff_scores:
|
|
avg_score = sum(s["score"] for s in staff_scores) / len(staff_scores)
|
|
avg_items = sum(s["total_items"] for s in staff_scores) / len(staff_scores)
|
|
else:
|
|
avg_score = 0
|
|
avg_items = 0
|
|
|
|
return {
|
|
"department": department.name,
|
|
"period": {"start": start_date.isoformat(), "end": end_date.isoformat()},
|
|
"staff_count": len(staff_scores),
|
|
"average_score": round(avg_score, 1),
|
|
"average_items_per_staff": round(avg_items, 1),
|
|
"top_performer": staff_scores[0] if staff_scores else None,
|
|
"needs_improvement": [s for s in staff_scores if s["score"] < 60],
|
|
"rankings": staff_scores,
|
|
}
|
|
|
|
@staticmethod
|
|
def export_staff_performance_report(
|
|
staff_ids: List[str],
|
|
user,
|
|
date_range: str = "30d",
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
format_type: str = "csv",
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Generate exportable staff performance report.
|
|
|
|
Args:
|
|
staff_ids: List of staff UUIDs to include
|
|
user: Current user
|
|
date_range: Date range filter
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
format_type: Export format ('csv', 'excel', 'json')
|
|
|
|
Returns:
|
|
dict: Report data and metadata
|
|
"""
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
# Get performance data
|
|
performance_data = UnifiedAnalyticsService.get_staff_performance_metrics(
|
|
user=user,
|
|
date_range=date_range,
|
|
staff_ids=staff_ids if staff_ids else None,
|
|
custom_start=custom_start,
|
|
custom_end=custom_end,
|
|
)
|
|
|
|
# Format for export
|
|
export_rows = []
|
|
|
|
for staff in performance_data["staff_metrics"]:
|
|
c = staff["complaints"]
|
|
i = staff["inquiries"]
|
|
|
|
# Calculate additional metrics
|
|
complaint_resolution_rate = 0
|
|
if c["total"] > 0:
|
|
complaint_resolution_rate = round(
|
|
(c["status"]["resolved"] + c["status"]["closed"]) / c["total"] * 100, 1
|
|
)
|
|
|
|
inquiry_resolution_rate = 0
|
|
if i["total"] > 0:
|
|
inquiry_resolution_rate = round((i["status"]["resolved"] + i["status"]["closed"]) / i["total"] * 100, 1)
|
|
|
|
export_rows.append(
|
|
{
|
|
"staff_name": staff["name"],
|
|
"email": staff["email"],
|
|
"hospital": staff["hospital"],
|
|
"department": staff["department"],
|
|
"complaints_total": c["total"],
|
|
"complaints_internal": c["internal"],
|
|
"complaints_external": c["external"],
|
|
"complaints_open": c["status"]["open"],
|
|
"complaints_resolved": c["status"]["resolved"],
|
|
"complaints_closed": c["status"]["closed"],
|
|
"complaint_resolution_rate": f"{complaint_resolution_rate}%",
|
|
"complaint_activation_within_2h": c["activation_time"]["within_2h"],
|
|
"complaint_response_within_24h": c["response_time"]["within_24h"],
|
|
"inquiries_total": i["total"],
|
|
"inquiries_open": i["status"]["open"],
|
|
"inquiries_resolved": i["status"]["resolved"],
|
|
"inquiry_resolution_rate": f"{inquiry_resolution_rate}%",
|
|
"inquiry_response_within_24h": i["response_time"]["within_24h"],
|
|
}
|
|
)
|
|
|
|
return {
|
|
"format": format_type,
|
|
"generated_at": timezone.now().isoformat(),
|
|
"period": {"start": start_date.isoformat(), "end": end_date.isoformat()},
|
|
"total_staff": len(export_rows),
|
|
"data": export_rows,
|
|
}
|
|
|
|
# ============================================================================
|
|
# EMPLOYEE EVALUATION DASHBOARD METHODS
|
|
# ============================================================================
|
|
|
|
@staticmethod
|
|
def get_employee_evaluation_metrics(
|
|
user,
|
|
date_range: str = "7d",
|
|
hospital_id: Optional[str] = None,
|
|
department_id: Optional[str] = None,
|
|
staff_ids: Optional[List[str]] = None,
|
|
custom_start: Optional[datetime] = None,
|
|
custom_end: Optional[datetime] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Get comprehensive employee evaluation metrics for the PAD Department Weekly Dashboard.
|
|
|
|
Returns metrics for all 11 sections of the evaluation dashboard:
|
|
1. Complaints by Response Time
|
|
2. Complaint Source Breakdown
|
|
3. Response Time by Source (CHI vs MOH)
|
|
4. Patient Type Breakdown
|
|
5. Department Type Breakdown
|
|
6. Delays and Activation
|
|
7. Escalated Complaints
|
|
8. Inquiries
|
|
9. Notes
|
|
10. Complaint Request & Filling Details
|
|
11. Report Completion Tracker
|
|
|
|
Args:
|
|
user: Current user
|
|
date_range: Date range filter
|
|
hospital_id: Optional hospital filter
|
|
department_id: Optional department filter
|
|
staff_ids: Optional list of specific staff IDs to evaluate
|
|
custom_start: Custom start date
|
|
custom_end: Custom end date
|
|
|
|
Returns:
|
|
dict: Employee evaluation metrics with all 11 sections per staff member
|
|
"""
|
|
from apps.accounts.models import User
|
|
from apps.dashboard.models import (
|
|
EvaluationNote,
|
|
ComplaintRequest,
|
|
ReportCompletion,
|
|
EscalatedComplaintLog,
|
|
InquiryDetail,
|
|
)
|
|
|
|
start_date, end_date = UnifiedAnalyticsService._get_date_range(date_range, custom_start, custom_end)
|
|
|
|
# Get staff queryset
|
|
staff_qs = User.objects.all()
|
|
|
|
# Filter by role
|
|
if not user.is_px_admin() and user.hospital:
|
|
staff_qs = staff_qs.filter(hospital=user.hospital)
|
|
|
|
# Apply filters
|
|
if hospital_id:
|
|
staff_qs = staff_qs.filter(hospital_id=hospital_id)
|
|
|
|
if department_id:
|
|
staff_qs = staff_qs.filter(department_id=department_id)
|
|
|
|
if staff_ids:
|
|
staff_qs = staff_qs.filter(id__in=staff_ids)
|
|
|
|
# Only staff with assigned complaints or inquiries
|
|
staff_qs = (
|
|
staff_qs.filter(Q(assigned_complaints__isnull=False) | Q(assigned_inquiries__isnull=False))
|
|
.distinct()
|
|
.prefetch_related("assigned_complaints", "assigned_inquiries")
|
|
)
|
|
|
|
staff_metrics = []
|
|
|
|
for staff_member in staff_qs:
|
|
staff_data = {
|
|
"id": str(staff_member.id),
|
|
"name": f"{staff_member.first_name} {staff_member.last_name}",
|
|
"email": staff_member.email,
|
|
"hospital": staff_member.hospital.name if staff_member.hospital else None,
|
|
"department": staff_member.department.name if staff_member.department else None,
|
|
}
|
|
|
|
# Get all metrics for this staff member
|
|
staff_data["complaints_response_time"] = UnifiedAnalyticsService._get_complaint_response_time_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["complaint_sources"] = UnifiedAnalyticsService._get_complaint_source_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["response_time_by_source"] = UnifiedAnalyticsService._get_response_time_by_source(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["patient_type_breakdown"] = UnifiedAnalyticsService._get_patient_type_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["department_type_breakdown"] = UnifiedAnalyticsService._get_department_type_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["delays_activation"] = UnifiedAnalyticsService._get_delays_and_activation(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["escalated_complaints"] = UnifiedAnalyticsService._get_escalated_complaints_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["inquiries"] = UnifiedAnalyticsService._get_inquiries_breakdown(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["notes"] = UnifiedAnalyticsService._get_notes_breakdown(staff_member.id, start_date, end_date)
|
|
staff_data["complaint_requests"] = UnifiedAnalyticsService._get_complaint_request_details(
|
|
staff_member.id, start_date, end_date
|
|
)
|
|
staff_data["report_completion"] = UnifiedAnalyticsService._get_report_completion_tracker(
|
|
staff_member.id, start_date
|
|
)
|
|
|
|
staff_metrics.append(staff_data)
|
|
|
|
# Calculate summary totals across all staff
|
|
summary = UnifiedAnalyticsService._get_evaluation_summary_totals(staff_metrics)
|
|
|
|
return {
|
|
"staff_metrics": staff_metrics,
|
|
"summary": summary,
|
|
"start_date": start_date.isoformat(),
|
|
"end_date": end_date.isoformat(),
|
|
"date_range": date_range,
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_complaint_response_time_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get complaint response time breakdown for a staff member.
|
|
|
|
Returns counts and percentages for: 24h, 48h, 72h, >72h
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
total = complaints.count()
|
|
|
|
if total == 0:
|
|
return {
|
|
"24h": 0,
|
|
"48h": 0,
|
|
"72h": 0,
|
|
"more_than_72h": 0,
|
|
"total": 0,
|
|
"percentages": {"24h": 0, "48h": 0, "72h": 0, "more_than_72h": 0},
|
|
}
|
|
|
|
# Count by response time
|
|
count_24h = 0
|
|
count_48h = 0
|
|
count_72h = 0
|
|
count_more_72h = 0
|
|
|
|
for complaint in complaints:
|
|
first_update = complaint.updates.first()
|
|
if first_update:
|
|
response_time_hours = (first_update.created_at - complaint.created_at).total_seconds() / 3600
|
|
if response_time_hours <= 24:
|
|
count_24h += 1
|
|
elif response_time_hours <= 48:
|
|
count_48h += 1
|
|
elif response_time_hours <= 72:
|
|
count_72h += 1
|
|
else:
|
|
count_more_72h += 1
|
|
|
|
return {
|
|
"24h": count_24h,
|
|
"48h": count_48h,
|
|
"72h": count_72h,
|
|
"more_than_72h": count_more_72h,
|
|
"total": total,
|
|
"percentages": {
|
|
"24h": round((count_24h / total) * 100, 1),
|
|
"48h": round((count_48h / total) * 100, 1),
|
|
"72h": round((count_72h / total) * 100, 1),
|
|
"more_than_72h": round((count_more_72h / total) * 100, 1),
|
|
},
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_complaint_source_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get complaint source breakdown for a staff member.
|
|
|
|
Returns counts for: MOH, CCHI, Patients, Patient's relatives, Insurance company
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
).select_related("source")
|
|
|
|
# Source mapping based on PXSource names
|
|
source_counts = {"MOH": 0, "CCHI": 0, "Patients": 0, "Patient_relatives": 0, "Insurance_company": 0}
|
|
|
|
for complaint in complaints:
|
|
if complaint.source:
|
|
source_name = complaint.source.name_en.upper()
|
|
if "MOH" in source_name or "MINISTRY" in source_name:
|
|
source_counts["MOH"] += 1
|
|
elif "CCHI" in source_name or "CHI" in source_name or "COUNCIL" in source_name:
|
|
source_counts["CCHI"] += 1
|
|
elif "PATIENT" in source_name:
|
|
source_counts["Patients"] += 1
|
|
elif "FAMILY" in source_name or "RELATIVE" in source_name:
|
|
source_counts["Patient_relatives"] += 1
|
|
elif "INSURANCE" in source_name:
|
|
source_counts["Insurance_company"] += 1
|
|
|
|
# Calculate source total and percentages
|
|
source_total = sum(source_counts.values())
|
|
|
|
if source_total > 0:
|
|
percentages = {k: round((v / source_total) * 100, 1) for k, v in source_counts.items()}
|
|
else:
|
|
percentages = {k: 0 for k in source_counts}
|
|
|
|
return {"counts": source_counts, "total": source_total, "percentages": percentages}
|
|
|
|
@staticmethod
|
|
def _get_response_time_by_source(staff_id, start_date, end_date):
|
|
"""
|
|
Get response time breakdown by source (CHI vs MOH).
|
|
|
|
Returns matrix of CHI and MOH counts by time category.
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
).select_related("source")
|
|
|
|
# Initialize counts
|
|
time_categories = ["24h", "48h", "72h", "more_than_72h"]
|
|
chi_counts = {cat: 0 for cat in time_categories}
|
|
moh_counts = {cat: 0 for cat in time_categories}
|
|
|
|
for complaint in complaints:
|
|
if not complaint.source:
|
|
continue
|
|
|
|
source_name = complaint.source.name_en.upper()
|
|
is_chi = "CCHI" in source_name or "CHI" in source_name
|
|
is_moh = "MOH" in source_name or "MINISTRY" in source_name
|
|
|
|
if not (is_chi or is_moh):
|
|
continue
|
|
|
|
# Determine response time category
|
|
first_update = complaint.updates.first()
|
|
if not first_update:
|
|
continue
|
|
|
|
response_time_hours = (first_update.created_at - complaint.created_at).total_seconds() / 3600
|
|
|
|
if response_time_hours <= 24:
|
|
time_cat = "24h"
|
|
elif response_time_hours <= 48:
|
|
time_cat = "48h"
|
|
elif response_time_hours <= 72:
|
|
time_cat = "72h"
|
|
else:
|
|
time_cat = "more_than_72h"
|
|
|
|
if is_chi:
|
|
chi_counts[time_cat] += 1
|
|
elif is_moh:
|
|
moh_counts[time_cat] += 1
|
|
|
|
return {
|
|
"24h": {"CHI": chi_counts["24h"], "MOH": moh_counts["24h"]},
|
|
"48h": {"CHI": chi_counts["48h"], "MOH": moh_counts["48h"]},
|
|
"72h": {"CHI": chi_counts["72h"], "MOH": moh_counts["72h"]},
|
|
"more_than_72h": {"CHI": chi_counts["more_than_72h"], "MOH": moh_counts["more_than_72h"]},
|
|
"totals": {"CHI": sum(chi_counts.values()), "MOH": sum(moh_counts.values())},
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_patient_type_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get patient type breakdown for a staff member.
|
|
|
|
Returns counts for: In-Patient, Out-Patient, ER
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
# For this implementation, we'll use encounter_id presence as a proxy
|
|
# In a real implementation, you'd check the patient's admission status
|
|
# For now, using a simplified approach based on metadata or location
|
|
|
|
counts = {"In_Patient": 0, "Out_Patient": 0, "ER": 0}
|
|
|
|
for complaint in complaints:
|
|
# Check metadata for patient type if available
|
|
if complaint.metadata and "patient_type" in complaint.metadata:
|
|
pt = complaint.metadata["patient_type"]
|
|
if pt in counts:
|
|
counts[pt] += 1
|
|
else:
|
|
# Default to Out-Patient if no info available
|
|
# In a real implementation, check against the Patient model
|
|
counts["Out_Patient"] += 1
|
|
|
|
total = sum(counts.values())
|
|
|
|
if total > 0:
|
|
percentages = {k: round((v / total) * 100, 1) for k, v in counts.items()}
|
|
else:
|
|
percentages = {k: 0 for k in counts}
|
|
|
|
return {"counts": counts, "total": total, "percentages": percentages}
|
|
|
|
@staticmethod
|
|
def _get_department_type_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get department type breakdown for a staff member.
|
|
|
|
Returns counts for: Medical, Admin, Nursing, Support Services
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
).select_related("department")
|
|
|
|
counts = {"Medical": 0, "Admin": 0, "Nursing": 0, "Support_Services": 0}
|
|
|
|
for complaint in complaints:
|
|
if complaint.department:
|
|
dept_name = complaint.department.name.upper()
|
|
if "MEDICAL" in dept_name or "CLINIC" in dept_name:
|
|
counts["Medical"] += 1
|
|
elif "ADMIN" in dept_name or "HR" in dept_name or "FINANCE" in dept_name:
|
|
counts["Admin"] += 1
|
|
elif "NURS" in dept_name:
|
|
counts["Nursing"] += 1
|
|
elif "SUPPORT" in dept_name or "MAINT" in dept_name or "HOUSEKEEP" in dept_name:
|
|
counts["Support_Services"] += 1
|
|
else:
|
|
# Default to Medical if uncertain
|
|
counts["Medical"] += 1
|
|
else:
|
|
counts["Medical"] += 1
|
|
|
|
total = sum(counts.values())
|
|
|
|
if total > 0:
|
|
percentages = {k: round((v / total) * 100, 1) for k, v in counts.items()}
|
|
else:
|
|
percentages = {k: 0 for k in counts}
|
|
|
|
return {"counts": counts, "total": total, "percentages": percentages}
|
|
|
|
@staticmethod
|
|
def _get_delays_and_activation(staff_id, start_date, end_date):
|
|
"""
|
|
Get delays in activation and activation within 2 hours.
|
|
|
|
Returns delays count, activation count, and percentages.
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
|
|
complaints = Complaint.objects.filter(
|
|
assigned_to_id=staff_id, created_at__gte=start_date, created_at__lte=end_date
|
|
)
|
|
|
|
total = complaints.count()
|
|
|
|
delays = 0
|
|
activated_within_2h = 0
|
|
|
|
for complaint in complaints:
|
|
if complaint.activated_at:
|
|
activation_time_hours = (complaint.activated_at - complaint.created_at).total_seconds() / 3600
|
|
if activation_time_hours <= 2:
|
|
activated_within_2h += 1
|
|
else:
|
|
delays += 1
|
|
else:
|
|
# Not activated = delay
|
|
delays += 1
|
|
|
|
return {
|
|
"delays": delays,
|
|
"activated_within_2h": activated_within_2h,
|
|
"total": total,
|
|
"percentages": {
|
|
"delays": round((delays / total) * 100, 1) if total > 0 else 0,
|
|
"activated": round((activated_within_2h / total) * 100, 1) if total > 0 else 0,
|
|
},
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_escalated_complaints_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get escalated complaints breakdown.
|
|
|
|
Returns counts for: Before 72h, Exactly 72h, After 72h, Resolved
|
|
"""
|
|
from apps.dashboard.models import EscalatedComplaintLog
|
|
|
|
logs = EscalatedComplaintLog.objects.filter(
|
|
staff_id=staff_id, week_start_date__gte=start_date.date(), week_start_date__lte=end_date.date()
|
|
)
|
|
|
|
counts = {"before_72h": 0, "exactly_72h": 0, "after_72h": 0, "resolved": 0}
|
|
|
|
for log in logs:
|
|
if log.escalation_timing == "before_72h":
|
|
counts["before_72h"] += 1
|
|
elif log.escalation_timing == "exactly_72h":
|
|
counts["exactly_72h"] += 1
|
|
elif log.escalation_timing == "after_72h":
|
|
counts["after_72h"] += 1
|
|
|
|
if log.is_resolved:
|
|
counts["resolved"] += 1
|
|
|
|
counts["total_escalated"] = counts["before_72h"] + counts["exactly_72h"] + counts["after_72h"]
|
|
|
|
return counts
|
|
|
|
@staticmethod
|
|
def _get_inquiries_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get inquiries breakdown with incoming/outgoing details.
|
|
|
|
Returns detailed breakdown for incoming and outgoing inquiries.
|
|
"""
|
|
from apps.dashboard.models import InquiryDetail
|
|
|
|
inquiry_details = InquiryDetail.objects.filter(
|
|
staff_id=staff_id, inquiry_date__gte=start_date.date(), inquiry_date__lte=end_date.date()
|
|
)
|
|
|
|
incoming = inquiry_details.filter(is_outgoing=False)
|
|
outgoing = inquiry_details.filter(is_outgoing=True)
|
|
|
|
def get_inquiry_metrics(queryset):
|
|
"""Helper to calculate metrics for a queryset"""
|
|
time_counts = {"24h": 0, "48h": 0, "72h": 0, "more_than_72h": 0}
|
|
status_counts = {"in_progress": 0, "contacted": 0, "contacted_no_response": 0}
|
|
|
|
for detail in queryset:
|
|
if detail.response_time_category:
|
|
time_counts[detail.response_time_category] += 1
|
|
if detail.inquiry_status:
|
|
status_counts[detail.inquiry_status] += 1
|
|
|
|
return {"total": queryset.count(), "by_time": time_counts, "by_status": status_counts}
|
|
|
|
# Get inquiry type breakdowns
|
|
inquiry_types = dict(InquiryDetail.INQUIRY_TYPE_CHOICES)
|
|
incoming_types = {t[0]: incoming.filter(inquiry_type=t[0]).count() for t in InquiryDetail.INQUIRY_TYPE_CHOICES}
|
|
outgoing_types = {t[0]: outgoing.filter(inquiry_type=t[0]).count() for t in InquiryDetail.INQUIRY_TYPE_CHOICES}
|
|
|
|
return {
|
|
"incoming": {**get_inquiry_metrics(incoming), "by_type": incoming_types},
|
|
"outgoing": {**get_inquiry_metrics(outgoing), "by_type": outgoing_types},
|
|
"total": inquiry_details.count(),
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_notes_breakdown(staff_id, start_date, end_date):
|
|
"""
|
|
Get notes breakdown by category and sub-category.
|
|
"""
|
|
from apps.dashboard.models import EvaluationNote
|
|
|
|
notes = EvaluationNote.objects.filter(
|
|
staff_id=staff_id, note_date__gte=start_date.date(), note_date__lte=end_date.date()
|
|
)
|
|
|
|
total = notes.aggregate(total=Sum("count"))["total"] or 0
|
|
|
|
# Breakdown by category
|
|
by_category = {}
|
|
categories = dict(EvaluationNote.CATEGORY_CHOICES)
|
|
subcategories = dict(EvaluationNote.SUBCATEGORY_CHOICES)
|
|
|
|
for note in notes:
|
|
cat_key = note.category
|
|
subcat_key = note.sub_category
|
|
|
|
if cat_key not in by_category:
|
|
by_category[cat_key] = {"name": categories.get(cat_key, cat_key), "total": 0, "subcategories": {}}
|
|
|
|
by_category[cat_key]["total"] += note.count
|
|
|
|
if subcat_key not in by_category[cat_key]["subcategories"]:
|
|
by_category[cat_key]["subcategories"][subcat_key] = {
|
|
"name": subcategories.get(subcat_key, subcat_key),
|
|
"count": 0,
|
|
}
|
|
|
|
by_category[cat_key]["subcategories"][subcat_key]["count"] += note.count
|
|
|
|
return {"total": total, "by_category": by_category}
|
|
|
|
@staticmethod
|
|
def _get_complaint_request_details(staff_id, start_date, end_date):
|
|
"""
|
|
Get complaint request filling details.
|
|
"""
|
|
from apps.dashboard.models import ComplaintRequest
|
|
|
|
requests = ComplaintRequest.objects.filter(
|
|
staff_id=staff_id, request_date__gte=start_date.date(), request_date__lte=end_date.date()
|
|
)
|
|
|
|
total = requests.count()
|
|
filled = requests.filter(filled=True).count()
|
|
not_filled = requests.filter(not_filled=True).count()
|
|
on_hold = requests.filter(on_hold=True).count()
|
|
from_barcode = requests.filter(from_barcode=True).count()
|
|
|
|
# Filling time breakdown
|
|
filling_times = dict(ComplaintRequest.FILLING_TIME_CHOICES)
|
|
time_breakdown = {
|
|
t[0]: requests.filter(filling_time_category=t[0]).count() for t in ComplaintRequest.FILLING_TIME_CHOICES
|
|
}
|
|
|
|
return {
|
|
"total": total,
|
|
"filled": filled,
|
|
"not_filled": not_filled,
|
|
"on_hold": on_hold,
|
|
"from_barcode": from_barcode,
|
|
"filling_time_breakdown": time_breakdown,
|
|
"percentages": {
|
|
"filled": round((filled / total) * 100, 1) if total > 0 else 0,
|
|
"not_filled": round((not_filled / total) * 100, 1) if total > 0 else 0,
|
|
"on_hold": round((on_hold / total) * 100, 1) if total > 0 else 0,
|
|
},
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_report_completion_tracker(staff_id, week_start_date):
|
|
"""
|
|
Get report completion status for a staff member.
|
|
"""
|
|
from apps.dashboard.models import ReportCompletion
|
|
|
|
completions = ReportCompletion.objects.filter(
|
|
staff_id=staff_id,
|
|
week_start_date=week_start_date.date() if hasattr(week_start_date, "date") else week_start_date,
|
|
)
|
|
|
|
reports = []
|
|
total_reports = len(ReportCompletion.REPORT_TYPE_CHOICES)
|
|
completed_count = 0
|
|
|
|
for report_type, report_name in ReportCompletion.REPORT_TYPE_CHOICES:
|
|
completion = completions.filter(report_type=report_type).first()
|
|
is_completed = completion.is_completed if completion else False
|
|
|
|
if is_completed:
|
|
completed_count += 1
|
|
|
|
reports.append(
|
|
{
|
|
"type": report_type,
|
|
"name": report_name,
|
|
"completed": is_completed,
|
|
"completed_at": completion.completed_at.isoformat()
|
|
if completion and completion.completed_at
|
|
else None,
|
|
}
|
|
)
|
|
|
|
completion_percentage = round((completed_count / total_reports) * 100, 1) if total_reports > 0 else 0
|
|
|
|
return {
|
|
"reports": reports,
|
|
"completed_count": completed_count,
|
|
"total_reports": total_reports,
|
|
"completion_percentage": completion_percentage,
|
|
}
|
|
|
|
@staticmethod
|
|
def _get_evaluation_summary_totals(staff_metrics):
|
|
"""
|
|
Calculate summary totals across all staff members.
|
|
"""
|
|
summary = {
|
|
"total_complaints": 0,
|
|
"total_inquiries": 0,
|
|
"total_notes": 0,
|
|
"total_escalated": 0,
|
|
"complaints_by_response_time": {"24h": 0, "48h": 0, "72h": 0, "more_than_72h": 0},
|
|
"complaints_by_source": {
|
|
"MOH": 0,
|
|
"CCHI": 0,
|
|
"Patients": 0,
|
|
"Patient_relatives": 0,
|
|
"Insurance_company": 0,
|
|
},
|
|
}
|
|
|
|
for staff in staff_metrics:
|
|
# Total complaints
|
|
summary["total_complaints"] += staff["complaints_response_time"]["total"]
|
|
|
|
# Total inquiries
|
|
summary["total_inquiries"] += staff["inquiries"]["total"]
|
|
|
|
# Total notes
|
|
summary["total_notes"] += staff["notes"]["total"]
|
|
|
|
# Total escalated
|
|
summary["total_escalated"] += staff["escalated_complaints"].get("total_escalated", 0)
|
|
|
|
# Response time totals
|
|
for key in ["24h", "48h", "72h", "more_than_72h"]:
|
|
summary["complaints_by_response_time"][key] += staff["complaints_response_time"].get(key, 0)
|
|
|
|
# Source totals
|
|
for key in summary["complaints_by_source"]:
|
|
summary["complaints_by_source"][key] += staff["complaint_sources"]["counts"].get(key, 0)
|
|
|
|
return summary
|