HH/apps/analytics/services/analytics_service.py
2026-01-15 14:31:58 +03:00

589 lines
22 KiB
Python

"""
Unified Analytics Service
Provides comprehensive analytics and metrics for the PX Command Center Dashboard.
Consolidates data from complaints, surveys, actions, physicians, and other modules.
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from django.db.models import Avg, Count, Q, Sum, F, ExpressionWrapper, DurationField
from django.utils import timezone
from django.core.cache import cache
from apps.complaints.models import Complaint, ComplaintStatus
from apps.complaints.analytics import ComplaintAnalytics
from apps.px_action_center.models import PXAction
from apps.surveys.models import SurveyInstance
from apps.social.models import SocialMediaComment
from apps.callcenter.models import CallCenterInteraction
from apps.physicians.models import PhysicianMonthlyRating
from apps.organizations.models import Department, Hospital
from apps.ai_engine.models import SentimentResult
from apps.analytics.models import KPI, KPIValue
class UnifiedAnalyticsService:
"""
Unified service for all PX360 analytics and KPIs.
Provides methods to retrieve:
- All KPIs with filters
- Chart data for various visualizations
- Department performance metrics
- Physician analytics
- Sentiment analysis metrics
- SLA compliance data
"""
# Cache timeout (in seconds) - 5 minutes for most data
CACHE_TIMEOUT = 300
@staticmethod
def _get_cache_key(prefix: str, **kwargs) -> str:
"""Generate cache key based on parameters"""
parts = [prefix]
for key, value in sorted(kwargs.items()):
if value is not None:
parts.append(f"{key}:{value}")
return ":".join(parts)
@staticmethod
def _get_date_range(date_range: str, custom_start=None, custom_end=None) -> tuple:
"""
Get start and end dates based on date_range parameter.
Args:
date_range: '7d', '30d', '90d', 'this_month', 'last_month', 'quarter', 'year', or 'custom'
custom_start: Custom start date (required if date_range='custom')
custom_end: Custom end date (required if date_range='custom')
Returns:
tuple: (start_date, end_date)
"""
now = timezone.now()
if date_range == 'custom' and custom_start and custom_end:
return custom_start, custom_end
date_ranges = {
'7d': timedelta(days=7),
'30d': timedelta(days=30),
'90d': timedelta(days=90),
}
if date_range in date_ranges:
end_date = now
start_date = now - date_ranges[date_range]
return start_date, end_date
elif date_range == 'this_month':
start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
end_date = now
return start_date, end_date
elif date_range == 'last_month':
if now.month == 1:
start_date = now.replace(year=now.year-1, month=12, day=1, hour=0, minute=0, second=0, microsecond=0)
end_date = now.replace(year=now.year-1, month=12, day=31, hour=23, minute=59, second=59)
else:
start_date = now.replace(month=now.month-1, day=1, hour=0, minute=0, second=0, microsecond=0)
# Get last day of previous month
next_month = now.replace(day=1)
last_day = (next_month - timedelta(days=1)).day
end_date = now.replace(month=now.month-1, day=last_day, hour=23, minute=59, second=59)
return start_date, end_date
elif date_range == 'quarter':
current_quarter = (now.month - 1) // 3
start_month = current_quarter * 3 + 1
start_date = now.replace(month=start_month, day=1, hour=0, minute=0, second=0, microsecond=0)
end_date = now
return start_date, end_date
elif date_range == 'year':
start_date = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
end_date = now
return start_date, end_date
# Default to 30 days
return now - timedelta(days=30), now
@staticmethod
def _filter_by_role(queryset, user) -> Any:
"""
Filter queryset based on user role and permissions.
Args:
queryset: Django queryset
user: User object
Returns:
Filtered queryset
"""
# Check if queryset has hospital/department fields
if hasattr(queryset.model, 'hospital'):
if user.is_px_admin():
pass # See all
elif user.is_hospital_admin() and user.hospital:
queryset = queryset.filter(hospital=user.hospital)
elif user.is_department_manager() and user.department:
queryset = queryset.filter(department=user.department)
else:
queryset = queryset.none()
return queryset
@staticmethod
def get_all_kpis(
user,
date_range: str = '30d',
hospital_id: Optional[str] = None,
department_id: Optional[str] = None,
kpi_category: Optional[str] = None,
custom_start: Optional[datetime] = None,
custom_end: Optional[datetime] = None
) -> Dict[str, Any]:
"""
Get all KPIs with applied filters.
Args:
user: Current user
date_range: Date range filter
hospital_id: Optional hospital filter
department_id: Optional department filter
kpi_category: Optional KPI category filter
custom_start: Custom start date
custom_end: Custom end date
Returns:
dict: All KPI values
"""
start_date, end_date = UnifiedAnalyticsService._get_date_range(
date_range, custom_start, custom_end
)
cache_key = UnifiedAnalyticsService._get_cache_key(
'all_kpis',
user_id=user.id,
date_range=date_range,
hospital_id=hospital_id,
department_id=department_id,
kpi_category=kpi_category
)
cached_data = cache.get(cache_key)
if cached_data:
return cached_data
# Get base querysets with role filtering
complaints_qs = UnifiedAnalyticsService._filter_by_role(
Complaint.objects.all(), user
).filter(created_at__gte=start_date, created_at__lte=end_date)
actions_qs = UnifiedAnalyticsService._filter_by_role(
PXAction.objects.all(), user
).filter(created_at__gte=start_date, created_at__lte=end_date)
surveys_qs = UnifiedAnalyticsService._filter_by_role(
SurveyInstance.objects.all(), user
).filter(
completed_at__gte=start_date,
completed_at__lte=end_date,
status='completed'
)
# Apply additional filters
if hospital_id:
hospital = Hospital.objects.filter(id=hospital_id).first()
if hospital:
complaints_qs = complaints_qs.filter(hospital=hospital)
actions_qs = actions_qs.filter(hospital=hospital)
surveys_qs = surveys_qs.filter(survey_template__hospital=hospital)
if department_id:
department = Department.objects.filter(id=department_id).first()
if department:
complaints_qs = complaints_qs.filter(department=department)
actions_qs = actions_qs.filter(department=department)
surveys_qs = surveys_qs.filter(journey_stage_instance__department=department)
# Calculate KPIs
kpis = {
# Complaints KPIs
'total_complaints': int(complaints_qs.count()),
'open_complaints': int(complaints_qs.filter(status__in=['open', 'in_progress']).count()),
'overdue_complaints': int(complaints_qs.filter(is_overdue=True).count()),
'high_severity_complaints': int(complaints_qs.filter(severity__in=['high', 'critical']).count()),
'resolved_complaints': int(complaints_qs.filter(status__in=['resolved', 'closed']).count()),
# Actions KPIs
'total_actions': int(actions_qs.count()),
'open_actions': int(actions_qs.filter(status__in=['open', 'in_progress']).count()),
'overdue_actions': int(actions_qs.filter(is_overdue=True).count()),
'escalated_actions': int(actions_qs.filter(escalation_level__gt=0).count()),
'resolved_actions': int(actions_qs.filter(status='completed').count()),
# Survey KPIs
'total_surveys': int(surveys_qs.count()),
'negative_surveys': int(surveys_qs.filter(is_negative=True).count()),
'avg_survey_score': float(surveys_qs.aggregate(avg=Avg('total_score'))['avg'] or 0),
# Social Media KPIs
'negative_social_comments': int(SocialMediaComment.objects.filter(
sentiment='negative',
published_at__gte=start_date,
published_at__lte=end_date
).count()),
# Call Center KPIs
'low_call_ratings': int(CallCenterInteraction.objects.filter(
is_low_rating=True,
call_started_at__gte=start_date,
call_started_at__lte=end_date
).count()),
# Sentiment KPIs
'total_sentiment_analyses': int(SentimentResult.objects.filter(
created_at__gte=start_date,
created_at__lte=end_date
).count()),
}
# Add trends (compare with previous period)
prev_start, prev_end = UnifiedAnalyticsService._get_date_range(
date_range, custom_start, custom_end
)
# Shift back by same duration
duration = end_date - start_date
prev_start = start_date - duration
prev_end = end_date - duration
prev_complaints = int(complaints_qs.filter(
created_at__gte=prev_start,
created_at__lte=prev_end
).count())
kpis['complaints_trend'] = {
'current': kpis['total_complaints'],
'previous': prev_complaints,
'percentage_change': float(
((kpis['total_complaints'] - prev_complaints) / prev_complaints * 100)
if prev_complaints > 0 else 0
)
}
# Cache the results
cache.set(cache_key, kpis, UnifiedAnalyticsService.CACHE_TIMEOUT)
return kpis
@staticmethod
def get_chart_data(
user,
chart_type: str,
date_range: str = '30d',
hospital_id: Optional[str] = None,
department_id: Optional[str] = None,
custom_start: Optional[datetime] = None,
custom_end: Optional[datetime] = None
) -> Dict[str, Any]:
"""
Get data for specific chart types.
Args:
user: Current user
chart_type: Type of chart ('complaints_trend', 'sla_compliance', 'survey_satisfaction', etc.)
date_range: Date range filter
hospital_id: Optional hospital filter
department_id: Optional department filter
custom_start: Custom start date
custom_end: Custom end date
Returns:
dict: Chart data in format suitable for ApexCharts
"""
start_date, end_date = UnifiedAnalyticsService._get_date_range(
date_range, custom_start, custom_end
)
cache_key = UnifiedAnalyticsService._get_cache_key(
f'chart_{chart_type}',
user_id=user.id,
date_range=date_range,
hospital_id=hospital_id,
department_id=department_id
)
cached_data = cache.get(cache_key)
if cached_data:
return cached_data
# Get base complaint queryset
complaints_qs = UnifiedAnalyticsService._filter_by_role(
Complaint.objects.all(), user
).filter(created_at__gte=start_date, created_at__lte=end_date)
surveys_qs = UnifiedAnalyticsService._filter_by_role(
SurveyInstance.objects.all(), user
).filter(
completed_at__gte=start_date,
completed_at__lte=end_date,
status='completed'
)
# Apply filters
if hospital_id:
complaints_qs = complaints_qs.filter(hospital_id=hospital_id)
surveys_qs = surveys_qs.filter(survey_template__hospital_id=hospital_id)
if department_id:
complaints_qs = complaints_qs.filter(department_id=department_id)
surveys_qs = surveys_qs.filter(journey_stage_instance__department_id=department_id)
if chart_type == 'complaints_trend':
data = UnifiedAnalyticsService._get_complaints_trend(complaints_qs, start_date, end_date)
elif chart_type == 'complaints_by_category':
data = UnifiedAnalyticsService._get_complaints_by_category(complaints_qs)
elif chart_type == 'complaints_by_severity':
data = UnifiedAnalyticsService._get_complaints_by_severity(complaints_qs)
elif chart_type == 'sla_compliance':
data = ComplaintAnalytics.get_sla_compliance(
hospital_id and Hospital.objects.filter(id=hospital_id).first(),
days=(end_date - start_date).days
)
elif chart_type == 'resolution_rate':
data = ComplaintAnalytics.get_resolution_rate(
hospital_id and Hospital.objects.filter(id=hospital_id).first(),
days=(end_date - start_date).days
)
elif chart_type == 'survey_satisfaction_trend':
data = UnifiedAnalyticsService._get_survey_satisfaction_trend(surveys_qs, start_date, end_date)
elif chart_type == 'survey_distribution':
data = UnifiedAnalyticsService._get_survey_distribution(surveys_qs)
elif chart_type == 'sentiment_distribution':
data = UnifiedAnalyticsService._get_sentiment_distribution(start_date, end_date)
elif chart_type == 'department_performance':
data = UnifiedAnalyticsService._get_department_performance(
user, start_date, end_date, hospital_id
)
elif chart_type == 'physician_leaderboard':
data = UnifiedAnalyticsService._get_physician_leaderboard(
user, start_date, end_date, hospital_id, department_id, limit=10
)
else:
data = {'error': f'Unknown chart type: {chart_type}'}
cache.set(cache_key, data, UnifiedAnalyticsService.CACHE_TIMEOUT)
return data
@staticmethod
def _get_complaints_trend(queryset, start_date, end_date) -> Dict[str, Any]:
"""Get complaints trend over time (grouped by day)"""
data = []
current_date = start_date
while current_date <= end_date:
next_date = current_date + timedelta(days=1)
count = queryset.filter(
created_at__gte=current_date,
created_at__lt=next_date
).count()
data.append({
'date': current_date.strftime('%Y-%m-%d'),
'count': count
})
current_date = next_date
return {
'type': 'line',
'labels': [d['date'] for d in data],
'series': [{'name': 'Complaints', 'data': [d['count'] for d in data]}]
}
@staticmethod
def _get_complaints_by_category(queryset) -> Dict[str, Any]:
"""Get complaints breakdown by category"""
categories = queryset.values('category').annotate(
count=Count('id')
).order_by('-count')
return {
'type': 'donut',
'labels': [c['category'] or 'Uncategorized' for c in categories],
'series': [c['count'] for c in categories]
}
@staticmethod
def _get_complaints_by_severity(queryset) -> Dict[str, Any]:
"""Get complaints breakdown by severity"""
severity_counts = queryset.values('severity').annotate(
count=Count('id')
).order_by('-count')
severity_labels = {
'low': 'Low',
'medium': 'Medium',
'high': 'High',
'critical': 'Critical'
}
return {
'type': 'pie',
'labels': [severity_labels.get(s['severity'], s['severity']) for s in severity_counts],
'series': [s['count'] for s in severity_counts]
}
@staticmethod
def _get_survey_satisfaction_trend(queryset, start_date, end_date) -> Dict[str, Any]:
"""Get survey satisfaction trend over time"""
data = []
current_date = start_date
while current_date <= end_date:
next_date = current_date + timedelta(days=1)
avg_score = queryset.filter(
completed_at__gte=current_date,
completed_at__lt=next_date
).aggregate(avg=Avg('total_score'))['avg'] or 0
data.append({
'date': current_date.strftime('%Y-%m-%d'),
'score': round(avg_score, 2)
})
current_date = next_date
return {
'type': 'line',
'labels': [d['date'] for d in data],
'series': [{'name': 'Satisfaction', 'data': [d['score'] for d in data]}]
}
@staticmethod
def _get_survey_distribution(queryset) -> Dict[str, Any]:
"""Get survey distribution by satisfaction level"""
distribution = {
'excellent': queryset.filter(total_score__gte=4.5).count(),
'good': queryset.filter(total_score__gte=3.5, total_score__lt=4.5).count(),
'average': queryset.filter(total_score__gte=2.5, total_score__lt=3.5).count(),
'poor': queryset.filter(total_score__lt=2.5).count(),
}
return {
'type': 'donut',
'labels': ['Excellent', 'Good', 'Average', 'Poor'],
'series': [
distribution['excellent'],
distribution['good'],
distribution['average'],
distribution['poor']
]
}
@staticmethod
def _get_sentiment_distribution(start_date, end_date) -> Dict[str, Any]:
"""Get sentiment analysis distribution"""
queryset = SentimentResult.objects.filter(
created_at__gte=start_date,
created_at__lte=end_date
)
distribution = queryset.values('sentiment').annotate(
count=Count('id')
)
sentiment_labels = {
'positive': 'Positive',
'neutral': 'Neutral',
'negative': 'Negative'
}
sentiment_order = ['positive', 'neutral', 'negative']
return {
'type': 'donut',
'labels': [sentiment_labels.get(s['sentiment'], s['sentiment']) for s in distribution],
'series': [s['count'] for s in distribution]
}
@staticmethod
def _get_department_performance(
user, start_date, end_date, hospital_id: Optional[str] = None
) -> Dict[str, Any]:
"""Get department performance rankings"""
queryset = Department.objects.filter(status='active')
if hospital_id:
queryset = queryset.filter(hospital_id=hospital_id)
elif not user.is_px_admin() and user.hospital:
queryset = queryset.filter(hospital=user.hospital)
# Annotate with survey data
departments = queryset.annotate(
avg_survey_score=Avg('journey_stages__survey_instance__total_score'),
survey_count=Count('journey_stages__survey_instance')
).filter(survey_count__gt=0).order_by('-avg_survey_score')[:10]
return {
'type': 'bar',
'labels': [d.name for d in departments],
'series': [{
'name': 'Average Score',
'data': [round(d.avg_survey_score or 0, 2) for d in departments]
}]
}
@staticmethod
def _get_physician_leaderboard(
user, start_date, end_date, hospital_id: Optional[str] = None,
department_id: Optional[str] = None, limit: int = 10
) -> Dict[str, Any]:
"""Get physician leaderboard for the current period"""
now = timezone.now()
queryset = PhysicianMonthlyRating.objects.filter(
year=now.year,
month=now.month
).select_related('staff', 'staff__hospital', 'staff__department')
# Apply RBAC filters
if not user.is_px_admin() and user.hospital:
queryset = queryset.filter(staff__hospital=user.hospital)
if hospital_id:
queryset = queryset.filter(staff__hospital_id=hospital_id)
if department_id:
queryset = queryset.filter(staff__department_id=department_id)
queryset = queryset.order_by('-average_rating')[:limit]
return {
'type': 'bar',
'labels': [f"{r.staff.first_name} {r.staff.last_name}" for r in queryset],
'series': [{
'name': 'Rating',
'data': [float(round(r.average_rating, 2)) for r in queryset]
}],
'metadata': [
{
'name': f"{r.staff.first_name} {r.staff.last_name}",
'physician_id': str(r.staff.id),
'specialization': r.staff.specialization,
'department': r.staff.department.name if r.staff.department else None,
'rating': float(round(r.average_rating, 2)),
'surveys': int(r.total_surveys) if r.total_surveys is not None else 0,
'positive': int(r.positive_count) if r.positive_count is not None else 0,
'neutral': int(r.neutral_count) if r.neutral_count is not None else 0,
'negative': int(r.negative_count) if r.negative_count is not None else 0
}
for r in queryset
]
}