HH/apps/complaints/tasks.py

2121 lines
87 KiB
Python

"""
Complaints Celery tasks
This module contains tasks for:
- Checking overdue complaints
- Sending SLA reminders
- Triggering resolution satisfaction surveys
- Creating PX actions from complaints
- AI-powered complaint analysis
"""
import logging
from typing import Optional, Dict, Any, Tuple
from celery import shared_task
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
logger = logging.getLogger(__name__)
def match_staff_from_name(staff_name: str, hospital_id: str, department_name: Optional[str] = None, return_all: bool = False, fuzzy_threshold: float = 0.65) -> Tuple[list, float, str]:
"""
Enhanced staff matching with fuzzy matching and improved accuracy.
This function uses fuzzy string matching (Levenshtein distance) to find staff members
with improved handling of:
- Name variations (with/without hyphens, different spellings)
- Typos and minor errors
- Matching against original full name field
- Better confidence scoring
Args:
staff_name: Name extracted from complaint (without titles)
hospital_id: Hospital ID to search within
department_name: Optional department name to prioritize matching
return_all: If True, return all matching staff. If False, return single best match.
fuzzy_threshold: Minimum similarity ratio for fuzzy matches (0.0 to 1.0)
Returns:
If return_all=True: Tuple of (matches_list, confidence_score, matching_method)
- matches_list: List of dicts with matched staff details
- confidence_score: Float from 0.0 to 1.0 (best match confidence)
- matching_method: Description of how staff was matched
If return_all=False: Tuple of (staff_id, confidence_score, matching_method)
- staff_id: UUID of matched staff or None
- confidence_score: Float from 0.0 to 1.0
- matching_method: Description of how staff was matched
"""
from apps.organizations.models import Staff, Department
if not staff_name or not staff_name.strip():
return [], 0.0, "No staff name provided"
staff_name = staff_name.strip()
normalized_input = _normalize_name(staff_name)
matches = []
# Build base query - staff from this hospital, active status
base_query = Staff.objects.filter(
hospital_id=hospital_id,
status='active'
)
# Get department if specified
dept_id = None
if department_name:
department = Department.objects.filter(
hospital_id=hospital_id,
name__iexact=department_name,
status='active'
).first()
if department:
dept_id = department.id
# Fetch all staff to perform fuzzy matching
all_staff = list(base_query)
# If department specified, filter
if dept_id:
dept_staff = [s for s in all_staff if str(s.department.id) == dept_id if s.department]
else:
dept_staff = []
# ========================================
# LAYER 1: EXACT MATCHES
# ========================================
# 1a. Exact match on first_name + last_name (English)
words = staff_name.split()
if len(words) >= 2:
first_name = words[0]
last_name = ' '.join(words[1:])
for staff in all_staff:
if staff.first_name.lower() == first_name.lower() and \
staff.last_name.lower() == last_name.lower():
confidence = 0.95 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.90
method = f"Exact English match in {'correct' if (dept_id and staff.department and str(staff.department.id) == dept_id) else 'any'} department"
if not any(m['id'] == str(staff.id) for m in matches):
matches.append(_create_match_dict(staff, confidence, method, staff_name))
logger.info(f"EXACT MATCH (EN): {staff.first_name} {staff.last_name} == {first_name} {last_name}")
# 1b. Exact match on full Arabic name
for staff in all_staff:
full_arabic = f"{staff.first_name_ar} {staff.last_name_ar}".strip()
if full_arabic == staff_name:
confidence = 0.95 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.90
method = f"Exact Arabic match in {'correct' if (dept_id and staff.department and str(staff.department.id) == dept_id) else 'any'} department"
if not any(m['id'] == str(staff.id) for m in matches):
matches.append(_create_match_dict(staff, confidence, method, staff_name))
logger.info(f"EXACT MATCH (AR): {full_arabic} == {staff_name}")
# 1c. Exact match on 'name' field (original full name)
for staff in all_staff:
if staff.name and staff.name.lower() == staff_name.lower():
confidence = 0.93
method = "Exact match on original name field"
if not any(m['id'] == str(staff.id) for m in matches):
matches.append(_create_match_dict(staff, confidence, method, staff_name))
logger.info(f"EXACT MATCH (name field): {staff.name} == {staff_name}")
# ========================================
# LAYER 2: FUZZY MATCHES (if no exact)
# ========================================
if not matches:
logger.info(f"No exact matches found, trying fuzzy matching for: {staff_name}")
for staff in all_staff:
# Try different name combinations
name_combinations = [
f"{staff.first_name} {staff.last_name}",
f"{staff.first_name_ar} {staff.last_name_ar}",
staff.name or "",
staff.first_name,
staff.last_name,
staff.first_name_ar,
staff.last_name_ar
]
# Check if any combination matches fuzzily
best_ratio = 0.0
best_match_name = ""
for combo in name_combinations:
if not combo:
continue
ratio = _fuzzy_match_ratio(staff_name, combo)
if ratio > best_ratio:
best_ratio = ratio
best_match_name = combo
# If good fuzzy match found
if best_ratio >= fuzzy_threshold:
# Adjust confidence based on match quality and department
dept_bonus = 0.05 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.0
confidence = best_ratio * 0.85 + dept_bonus # Scale down slightly for fuzzy
method = f"Fuzzy match ({best_ratio:.2f}) on '{best_match_name}'"
if not any(m['id'] == str(staff.id) for m in matches):
matches.append(_create_match_dict(staff, confidence, method, staff_name))
logger.info(f"FUZZY MATCH ({best_ratio:.2f}): {best_match_name} ~ {staff_name}")
# ========================================
# LAYER 3: PARTIAL/WORD MATCHES
# ========================================
if not matches:
logger.info(f"No fuzzy matches found, trying partial/word matching for: {staff_name}")
# Split input name into words
input_words = [_normalize_name(w) for w in staff_name.split() if _normalize_name(w)]
for staff in all_staff:
# Build list of all name fields
staff_names = [
staff.first_name,
staff.last_name,
staff.first_name_ar,
staff.last_name_ar,
staff.name or ""
]
# Count word matches
match_count = 0
total_words = len(input_words)
for word in input_words:
word_matched = False
for staff_name_field in staff_names:
if _normalize_name(staff_name_field) == word or \
word in _normalize_name(staff_name_field):
word_matched = True
break
if word_matched:
match_count += 1
# If at least 2 words match (or all if only 2 words)
if match_count >= 2 or (total_words == 2 and match_count == 2):
confidence = 0.60 + (match_count / total_words) * 0.15
dept_bonus = 0.05 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.0
confidence += dept_bonus
method = f"Partial match ({match_count}/{total_words} words)"
if not any(m['id'] == str(staff.id) for m in matches):
matches.append(_create_match_dict(staff, confidence, method, staff_name))
logger.info(f"PARTIAL MATCH ({match_count}/{total_words}): {staff.first_name} {staff.last_name}")
# ========================================
# FINAL: SORT AND RETURN
# ========================================
if matches:
# Sort by confidence (descending)
matches.sort(key=lambda x: x['confidence'], reverse=True)
best_confidence = matches[0]['confidence']
best_method = matches[0]['matching_method']
logger.info(
f"Returning {len(matches)} match(es) for '{staff_name}'. "
f"Best: {matches[0]['name_en']} (confidence: {best_confidence:.2f}, method: {best_method})"
)
if not return_all:
return str(matches[0]['id']), best_confidence, best_method
else:
return matches, best_confidence, best_method
else:
logger.warning(f"No staff match found for name: '{staff_name}'")
return [], 0.0, "No match found"
def _fuzzy_match_ratio(str1: str, str2: str) -> float:
"""
Calculate fuzzy match ratio using difflib.
Args:
str1: First string
str2: Second string
Returns:
Float from 0.0 to 1.0 representing similarity
"""
try:
from difflib import SequenceMatcher
return SequenceMatcher(None, str1.lower(), str2.lower()).ratio()
except Exception:
return 0.0
def _normalize_name(name: str) -> str:
"""
Normalize name for better matching.
- Remove extra spaces
- Remove hyphens (Al-Shammari -> AlShammari)
- Convert to lowercase
- Remove common titles
"""
if not name:
return ""
name = name.strip().lower()
# Remove common titles (both English and Arabic)
titles = ['dr.', 'dr', 'mr.', 'mr', 'mrs.', 'mrs', 'ms.', 'ms',
'د.', 'السيد', 'السيدة', 'الدكتور']
for title in titles:
if name.startswith(title):
name = name[len(title):].strip()
# Remove hyphens for better matching (Al-Shammari -> AlShammari)
name = name.replace('-', '')
# Remove extra spaces
while ' ' in name:
name = name.replace(' ', ' ')
return name.strip()
def _create_match_dict(staff, confidence: float, method: str, source_name: str) -> Dict[str, Any]:
"""
Create a match dictionary for a staff member.
Args:
staff: Staff model instance
confidence: Confidence score (0.0 to 1.0)
method: Description of matching method
source_name: Original input name that was matched
Returns:
Dictionary with match details
"""
return {
'id': str(staff.id),
'name_en': f"{staff.first_name} {staff.last_name}",
'name_ar': f"{staff.first_name_ar} {staff.last_name_ar}" if staff.first_name_ar and staff.last_name_ar else "",
'original_name': staff.name or "",
'job_title': staff.job_title,
'specialization': staff.specialization,
'department': staff.department.name if staff.department else None,
'department_id': str(staff.department.id) if staff.department else None,
'confidence': confidence,
'matching_method': method,
'source_name': source_name
}
@shared_task
def check_overdue_complaints():
"""
Periodic task to check for overdue complaints.
Runs every 15 minutes (configured in config/celery.py).
Updates is_overdue flag for complaints past their SLA deadline.
Triggers automatic escalation based on escalation rules.
"""
from apps.complaints.models import Complaint, ComplaintStatus
# Get active complaints (not closed or cancelled)
active_complaints = Complaint.objects.filter(
status__in=[ComplaintStatus.OPEN, ComplaintStatus.IN_PROGRESS, ComplaintStatus.RESOLVED]
).select_related('hospital', 'patient', 'department')
overdue_count = 0
escalated_count = 0
for complaint in active_complaints:
if complaint.check_overdue():
overdue_count += 1
logger.warning(
f"Complaint {complaint.id} is overdue: {complaint.title} "
f"(due: {complaint.due_at})"
)
# Trigger automatic escalation
result = escalate_complaint_auto.delay(str(complaint.id))
if result:
escalated_count += 1
if overdue_count > 0:
logger.info(f"Found {overdue_count} overdue complaints, triggered {escalated_count} escalations")
return {
'overdue_count': overdue_count,
'escalated_count': escalated_count
}
@shared_task
def send_complaint_resolution_survey(complaint_id):
"""
Send resolution satisfaction survey when complaint is closed.
This task is triggered when a complaint status changes to CLOSED.
Args:
complaint_id: UUID of the Complaint
Returns:
dict: Result with survey_instance_id
"""
from apps.complaints.models import Complaint
from apps.core.services import create_audit_log
from apps.surveys.models import SurveyInstance, SurveyTemplate
try:
complaint = Complaint.objects.select_related(
'patient', 'hospital'
).get(id=complaint_id)
# Check if survey already sent
if complaint.resolution_survey:
logger.info(f"Resolution survey already sent for complaint {complaint_id}")
return {'status': 'skipped', 'reason': 'already_sent'}
# Get resolution satisfaction survey template
try:
survey_template = SurveyTemplate.objects.get(
hospital=complaint.hospital,
survey_type='complaint_resolution',
is_active=True
)
except SurveyTemplate.DoesNotExist:
logger.warning(
f"No resolution satisfaction survey template found for hospital {complaint.hospital.name}"
)
return {'status': 'skipped', 'reason': 'no_template'}
# Create survey instance
with transaction.atomic():
survey_instance = SurveyInstance.objects.create(
survey_template=survey_template,
patient=complaint.patient,
encounter_id=complaint.encounter_id,
delivery_channel='sms', # Default
recipient_phone=complaint.patient.phone,
recipient_email=complaint.patient.email,
metadata={
'complaint_id': str(complaint.id),
'complaint_title': complaint.title
}
)
# Link survey to complaint
complaint.resolution_survey = survey_instance
complaint.resolution_survey_sent_at = timezone.now()
complaint.save(update_fields=['resolution_survey', 'resolution_survey_sent_at'])
# Send survey
from apps.notifications.services import NotificationService
notification_log = NotificationService.send_survey_invitation(
survey_instance=survey_instance,
language='en' # TODO: Get from patient preference
)
# Update survey status
survey_instance.status = 'active'
survey_instance.sent_at = timezone.now()
survey_instance.save(update_fields=['status', 'sent_at'])
# Log audit event
create_audit_log(
event_type='survey_sent',
description=f"Resolution satisfaction survey sent for complaint: {complaint.title}",
content_object=survey_instance,
metadata={
'complaint_id': str(complaint.id),
'survey_template': survey_template.name
}
)
logger.info(
f"Resolution satisfaction survey sent for complaint {complaint.id}"
)
return {
'status': 'sent',
'survey_instance_id': str(survey_instance.id),
'notification_log_id': str(notification_log.id)
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error sending resolution survey: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def check_resolution_survey_threshold(survey_instance_id, complaint_id):
"""
Check if resolution survey score breaches threshold and create PX Action if needed.
This task is triggered when a complaint resolution survey is completed.
Args:
survey_instance_id: UUID of the SurveyInstance
complaint_id: UUID of the Complaint
Returns:
dict: Result with action status
"""
from apps.complaints.models import Complaint, ComplaintThreshold
from apps.surveys.models import SurveyInstance
from apps.px_action_center.models import PXAction
from django.contrib.contenttypes.models import ContentType
try:
survey = SurveyInstance.objects.get(id=survey_instance_id)
complaint = Complaint.objects.select_related('hospital', 'patient').get(id=complaint_id)
# Get threshold for this hospital
try:
threshold = ComplaintThreshold.objects.get(
hospital=complaint.hospital,
threshold_type='resolution_survey_score',
is_active=True
)
except ComplaintThreshold.DoesNotExist:
logger.info(f"No resolution survey threshold configured for hospital {complaint.hospital.name_en}")
return {'status': 'no_threshold'}
# Check if threshold is breached
if threshold.check_threshold(survey.score):
logger.warning(
f"Resolution survey score {survey.score} breaches threshold {threshold.threshold_value} "
f"for complaint {complaint_id}"
)
# Create PX Action
complaint_ct = ContentType.objects.get_for_model(Complaint)
action = PXAction.objects.create(
title=f"Low Resolution Satisfaction: {complaint.title[:100]}",
description=(
f"Complaint resolution survey scored {survey.score}% "
f"(threshold: {threshold.threshold_value}%). "
f"Original complaint: {complaint.description[:200]}"
),
source='complaint_resolution_survey',
priority='high' if survey.score < 30 else 'medium',
hospital=complaint.hospital,
department=complaint.department,
patient=complaint.patient,
content_type=complaint_ct,
object_id=complaint.id,
metadata={
'complaint_id': str(complaint.id),
'survey_id': str(survey.id),
'survey_score': survey.score,
'threshold_value': threshold.threshold_value,
}
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='px_action_created',
description=f"PX Action created from low resolution survey score",
content_object=action,
metadata={
'complaint_id': str(complaint.id),
'survey_score': survey.score,
'trigger': 'resolution_survey_threshold'
}
)
logger.info(f"Created PX Action {action.id} from low resolution survey score")
return {
'status': 'action_created',
'action_id': str(action.id),
'survey_score': survey.score,
'threshold': threshold.threshold_value
}
else:
logger.info(f"Resolution survey score {survey.score} is above threshold {threshold.threshold_value}")
return {'status': 'threshold_not_breached', 'survey_score': survey.score}
except SurveyInstance.DoesNotExist:
error_msg = f"SurveyInstance {survey_instance_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error checking resolution survey threshold: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def create_action_from_complaint(complaint_id):
"""
Create PX Action from complaint (if configured).
This task is triggered when a complaint is created,
if the hospital configuration requires automatic action creation.
Args:
complaint_id: UUID of the Complaint
Returns:
dict: Result with action_id
"""
from apps.complaints.models import Complaint
from apps.organizations.models import Hospital
from apps.px_action_center.models import PXAction
from django.contrib.contenttypes.models import ContentType
try:
complaint = Complaint.objects.select_related('hospital', 'patient', 'department').get(id=complaint_id)
# Check if hospital has auto-create enabled
# For now, we'll check metadata on hospital or use a simple rule
# In production, you'd have a HospitalComplaintConfig model
# Handle case where metadata field might not exist (legacy data)
hospital_metadata = getattr(complaint.hospital, 'metadata', None)
if hospital_metadata is None:
hospital_metadata = {}
auto_create = hospital_metadata.get('auto_create_action_on_complaint', False)
if not auto_create:
logger.info(f"Auto-create PX Action disabled for hospital {complaint.hospital.name}")
return {'status': 'disabled'}
# Use JSON-serializable values instead of model objects
category_name = complaint.category.name_en if complaint.category else None
category_id = str(complaint.category.id) if complaint.category else None
# Create PX Action
complaint_ct = ContentType.objects.get_for_model(Complaint)
action = PXAction.objects.create(
title=f"New Complaint: {complaint.title[:100]}",
description=complaint.description[:500],
source='complaint',
priority=complaint.priority,
hospital=complaint.hospital,
department=complaint.department,
patient=complaint.patient,
content_type=complaint_ct,
object_id=complaint.id,
metadata={
'complaint_id': str(complaint.id),
'complaint_category': category_name,
'complaint_category_id': category_id,
'complaint_severity': complaint.severity,
}
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='px_action_created',
description=f"PX Action created from complaint",
content_object=action,
metadata={
'complaint_id': str(complaint.id),
'trigger': 'complaint_creation'
}
)
logger.info(f"Created PX Action {action.id} from complaint {complaint_id}")
return {
'status': 'action_created',
'action_id': str(action.id)
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error creating action from complaint: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def escalate_complaint_auto(complaint_id):
"""
Automatically escalate complaint based on escalation rules.
This task is triggered when a complaint becomes overdue.
It finds matching escalation rules and reassigns the complaint.
Supports multi-level escalation with tracking.
Args:
complaint_id: UUID of the Complaint
Returns:
dict: Result with escalation status
"""
from apps.complaints.models import Complaint, ComplaintUpdate, EscalationRule
from apps.accounts.models import User
try:
complaint = Complaint.objects.select_related(
'hospital', 'department', 'assigned_to'
).get(id=complaint_id)
# Get current escalation level from metadata
current_level = complaint.metadata.get('escalation_level', 0)
# Calculate hours overdue
hours_overdue = (timezone.now() - complaint.due_at).total_seconds() / 3600
# Get applicable escalation rules for this hospital, ordered by escalation_level
rules = EscalationRule.objects.filter(
hospital=complaint.hospital,
is_active=True,
trigger_on_overdue=True
).order_by('escalation_level', 'order')
# Filter rules by severity and priority if specified
if complaint.severity:
rules = rules.filter(
Q(severity_filter='') | Q(severity_filter=complaint.severity)
)
if complaint.priority:
rules = rules.filter(
Q(priority_filter='') | Q(priority_filter=complaint.priority)
)
# Find matching rule for next escalation level
matching_rule = None
for rule in rules:
# Check if this is the next escalation level
if rule.escalation_level == current_level + 1:
# Check if we've exceeded trigger hours
if hours_overdue >= rule.trigger_hours_overdue:
# Check if we've exceeded max level
max_level = rule.max_escalation_level
if current_level >= max_level:
logger.info(
f"Complaint {complaint_id} has reached max escalation level {max_level}"
)
return {
'status': 'max_level_reached',
'max_level': max_level,
'current_level': current_level
}
matching_rule = rule
break
if not matching_rule:
logger.info(
f"No matching escalation rule found for complaint {complaint_id} "
f"(current level: {current_level}, hours overdue: {hours_overdue:.1f})"
)
return {'status': 'no_matching_rule', 'current_level': current_level}
# Determine escalation target
escalation_target = None
if matching_rule.escalate_to_role == 'department_manager':
if complaint.department and complaint.department.manager:
escalation_target = complaint.department.manager
elif matching_rule.escalate_to_role == 'hospital_admin':
# Find hospital admin for this hospital
escalation_target = User.objects.filter(
hospital=complaint.hospital,
groups__name='Hospital Admin',
is_active=True
).first()
elif matching_rule.escalate_to_role == 'px_admin':
# Find PX admin
escalation_target = User.objects.filter(
groups__name='PX Admin',
is_active=True
).first()
elif matching_rule.escalate_to_role == 'ceo':
# Find CEO for this hospital
escalation_target = User.objects.filter(
hospital=complaint.hospital,
groups__name='CEO',
is_active=True
).first()
elif matching_rule.escalate_to_role == 'specific_user':
escalation_target = matching_rule.escalate_to_user
if not escalation_target:
logger.warning(
f"Could not find escalation target for rule {matching_rule.name} "
f"({matching_rule.escalate_to_role}) on complaint {complaint_id}"
)
return {
'status': 'no_target_found',
'rule': matching_rule.name,
'role': matching_rule.escalate_to_role
}
# Check if already assigned to this person to avoid redundant escalation
if complaint.assigned_to and complaint.assigned_to.id == escalation_target.id:
logger.info(
f"Complaint {complaint_id} already assigned to {escalation_target.get_full_name()}, "
f"skipping escalation to same person"
)
return {
'status': 'already_assigned',
'escalated_to': escalation_target.get_full_name()
}
# Perform escalation
old_assignee = complaint.assigned_to
complaint.assigned_to = escalation_target
complaint.escalated_at = timezone.now()
# Update metadata with escalation level
complaint.metadata['escalation_level'] = matching_rule.escalation_level
complaint.metadata['last_escalation_rule'] = {
'id': str(matching_rule.id),
'name': matching_rule.name,
'level': matching_rule.escalation_level,
'timestamp': timezone.now().isoformat()
}
complaint.save(update_fields=['assigned_to', 'escalated_at', 'metadata'])
# Create update
ComplaintUpdate.objects.create(
complaint=complaint,
update_type='escalation',
message=(
f"Automatically escalated to {escalation_target.get_full_name()} "
f"(Level {matching_rule.escalation_level}, Rule: {matching_rule.name}). "
f"Complaint is {hours_overdue:.1f} hours overdue."
),
created_by=None, # System action
metadata={
'rule_id': str(matching_rule.id),
'rule_name': matching_rule.name,
'escalation_level': matching_rule.escalation_level,
'hours_overdue': hours_overdue,
'old_assignee_id': str(old_assignee.id) if old_assignee else None,
'new_assignee_id': str(escalation_target.id)
}
)
# Send notifications
send_complaint_notification.delay(
complaint_id=str(complaint.id),
event_type='escalated'
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='complaint_escalated',
description=f"Complaint automatically escalated to {escalation_target.get_full_name()} (Level {matching_rule.escalation_level})",
content_object=complaint,
metadata={
'rule': matching_rule.name,
'level': matching_rule.escalation_level,
'hours_overdue': hours_overdue,
'escalated_to': escalation_target.get_full_name()
}
)
logger.info(
f"Escalated complaint {complaint_id} to {escalation_target.get_full_name()} "
f"(Level {matching_rule.escalation_level}) using rule '{matching_rule.name}'"
)
return {
'status': 'escalated',
'rule': matching_rule.name,
'level': matching_rule.escalation_level,
'escalated_to': escalation_target.get_full_name(),
'hours_overdue': round(hours_overdue, 2)
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error escalating complaint: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def escalate_after_reminder(complaint_id):
"""
Escalate complaint after reminder if no action taken.
This task is triggered by the SLA reminder task for rules with
reminder_escalation_enabled. It checks if the complaint has had any
activity since the reminder was sent, and escalates if not.
Args:
complaint_id: UUID of the Complaint
Returns:
dict: Result with escalation status
"""
from apps.complaints.models import Complaint, ComplaintUpdate, EscalationRule
try:
complaint = Complaint.objects.select_related(
'hospital', 'department', 'assigned_to'
).get(id=complaint_id)
# Check if reminder was sent
if not complaint.reminder_sent_at:
logger.info(f"No reminder sent for complaint {complaint_id}, skipping escalation")
return {'status': 'no_reminder_sent'}
# Get SLA config to check reminder-based escalation
from apps.complaints.models import ComplaintSLAConfig
try:
sla_config = ComplaintSLAConfig.objects.get(
hospital=complaint.hospital,
severity=complaint.severity,
priority=complaint.priority,
is_active=True
)
except ComplaintSLAConfig.DoesNotExist:
logger.info(f"No SLA config for complaint {complaint_id}, skipping reminder escalation")
return {'status': 'no_sla_config'}
# Check if reminder escalation is enabled for this hospital
rules = EscalationRule.objects.filter(
hospital=complaint.hospital,
is_active=True,
reminder_escalation_enabled=True
).order_by('escalation_level')
# Filter by severity/priority
if complaint.severity:
rules = rules.filter(
Q(severity_filter='') | Q(severity_filter=complaint.severity)
)
if complaint.priority:
rules = rules.filter(
Q(priority_filter='') | Q(priority_filter=complaint.priority)
)
if not rules.exists():
logger.info(f"No reminder escalation rules for complaint {complaint_id}")
return {'status': 'no_rules'}
# Get current escalation level
current_level = complaint.metadata.get('escalation_level', 0)
# Find matching rule for next level
matching_rule = None
for rule in rules:
if rule.escalation_level == current_level + 1:
# Calculate time since reminder
hours_since_reminder = (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600
# Check if enough time has passed since reminder
if hours_since_reminder >= rule.reminder_escalation_hours:
matching_rule = rule
break
if not matching_rule:
logger.info(
f"Reminder escalation not yet triggered for complaint {complaint_id} "
f"(hours since reminder: {(timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600:.1f})"
)
return {
'status': 'not_yet_triggered',
'hours_since_reminder': (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600
}
# Trigger the regular escalation task
result = escalate_complaint_auto.delay(complaint_id)
# Add metadata about this being a reminder-based escalation
if complaint.metadata:
complaint.metadata['reminder_escalation'] = {
'rule_id': str(matching_rule.id),
'rule_name': matching_rule.name,
'hours_since_reminder': (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600,
'timestamp': timezone.now().isoformat()
}
complaint.save(update_fields=['metadata'])
logger.info(
f"Reminder-based escalation triggered for complaint {complaint_id} "
f"using rule '{matching_rule.name}'"
)
return {
'status': 'reminder_escalation_triggered',
'rule': matching_rule.name,
'escalation_result': result
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error in reminder escalation: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def analyze_complaint_with_ai(complaint_id):
"""
Analyze a complaint using AI to determine severity and priority and category.
This task is triggered when a complaint is created.
It uses the AI service to analyze the complaint content and classify it.
Args:
complaint_id: UUID of the Complaint
Returns:
dict: Result with severity, priority, category, and reasoning
"""
from apps.complaints.models import Complaint
from apps.core.ai_service import AIService, AIServiceError
try:
complaint = Complaint.objects.select_related('hospital').get(id=complaint_id)
logger.info(f"Starting AI analysis for complaint {complaint_id}")
# Get category name if category exists
category_name = None
if complaint.category:
category_name = complaint.category.name_en
# Analyze complaint using AI service
try:
analysis = AIService.analyze_complaint(
title=complaint.title,
description=complaint.description,
category=category_name,
hospital_id=complaint.hospital.id
)
# Analyze emotion using AI service
emotion_analysis = AIService.analyze_emotion(
text=complaint.description
)
# Update complaint with AI-determined values
old_severity = complaint.severity
old_priority = complaint.priority
old_category = complaint.category
old_department = complaint.department
complaint.severity = analysis['severity']
complaint.priority = analysis['priority']
from apps.complaints.models import ComplaintCategory
if category := ComplaintCategory.objects.filter(name_en=analysis['category']).first():
complaint.category = category
# Update department from AI analysis
department_name = analysis.get('department', '')
if department_name:
from apps.organizations.models import Department
# Try exact match first (case-insensitive)
if department := Department.objects.filter(
hospital_id=complaint.hospital.id,
name__iexact=department_name,
status='active'
).first():
complaint.department = department
logger.info(f"Matched department exactly: {department.name}")
# If no exact match, try partial match
elif department := Department.objects.filter(
hospital_id=complaint.hospital.id,
name__icontains=department_name,
status='active'
).first():
complaint.department = department
logger.info(f"Matched department partially: {department.name} from '{department_name}'")
else:
logger.warning(f"AI suggested department '{department_name}' but no match found in hospital '{complaint.hospital.name}'")
# Update title from AI analysis (use English version)
if analysis.get('title_en'):
complaint.title = analysis['title_en']
elif analysis.get('title'):
complaint.title = analysis['title']
# Get ALL staff names from analyze_complaint result (extracted by AI)
staff_names = analysis.get('staff_names', [])
primary_staff_name = analysis.get('primary_staff_name', '').strip()
# Always get ALL matching staff for PX Admin review
all_staff_matches = []
staff_confidence = 0.0
staff_matching_method = None
matched_staff_id = None
# Capture old staff before matching
old_staff = complaint.staff
# Process ALL extracted staff names
if staff_names:
logger.info(f"AI extracted {len(staff_names)} staff name(s): {staff_names}")
# Loop through each extracted name and match to database
for idx, staff_name in enumerate(staff_names):
staff_name = staff_name.strip()
if not staff_name:
continue
logger.info(f"Matching staff name {idx+1}/{len(staff_names)}: {staff_name}")
# Try matching WITH department filter first (higher confidence if match found)
matches_for_name, confidence_for_name, method_for_name = match_staff_from_name(
staff_name=staff_name,
hospital_id=str(complaint.hospital.id),
department_name=department_name,
return_all=True # Return ALL matches
)
# If no match found with department, try WITHOUT department filter
if not matches_for_name:
logger.info(f"No match found with department filter '{department_name}' for '{staff_name}', trying without department filter...")
matches_for_name, confidence_for_name, method_for_name = match_staff_from_name(
staff_name=staff_name,
hospital_id=str(complaint.hospital.id),
department_name=None, # Search all departments
return_all=True
)
# Add source_name to each match so we know which extracted name it came from
for match in matches_for_name:
match['source_name'] = staff_name
all_staff_matches.extend(matches_for_name)
# Deduplicate matches (same staff can match multiple names)
seen_ids = set()
deduped_matches = []
for match in all_staff_matches:
if match['id'] not in seen_ids:
seen_ids.add(match['id'])
deduped_matches.append(match)
all_staff_matches = deduped_matches
logger.info(f"Total unique staff matches found: {len(all_staff_matches)}")
# Logic for staff assignment - CHANGED: NO AUTO-ASSIGNMENT
needs_staff_review = False
if all_staff_matches:
# Sort by confidence (descending)
all_staff_matches.sort(key=lambda x: x['confidence'], reverse=True)
# Get best match (highest confidence) - BUT DON'T AUTO-ASSIGN
best_match = all_staff_matches[0]
matched_staff_id = best_match['id']
staff_confidence = best_match['confidence']
staff_matching_method = best_match['matching_method']
# DO NOT AUTO-ASSIGN STAFF - Only store suggestions in metadata
# PX Admins will manually select from suggestions
logger.info(
f"Found staff suggestion: {best_match['name_en']} "
f"for complaint {complaint_id} "
f"(confidence: {staff_confidence:.2f}, method: {staff_matching_method}) - "
f"NOT auto-assigned, pending manual review"
)
# Mark for review if:
# - Low confidence on best match
# - Multiple names extracted (multiple people mentioned)
# - Multiple database matches found
# - ALWAYS mark for review since we're not auto-assigning
needs_staff_review = True
# Assign to department if confidence is high enough (>= 0.7)
if staff_confidence >= 0.7 and best_match.get('department_id'):
from apps.organizations.models import Department
try:
dept = Department.objects.get(id=best_match['department_id'])
complaint.department = dept
logger.info(f"Assigned to department based on staff match: {dept.name}")
except Department.DoesNotExist:
pass
else:
# No matches found
logger.warning(f"No staff matches found for extracted names")
needs_staff_review = False # No review needed if no names found
else:
# No staff names extracted
logger.info("No staff names extracted from complaint")
needs_staff_review = False
# Save reasoning in metadata
# Use JSON-serializable values instead of model objects
old_category_name = old_category.name_en if old_category else None
old_category_id = str(old_category.id) if old_category else None
old_department_name = old_department.name if old_department else None
old_department_id = str(old_department.id) if old_department else None
old_staff_name = f"{old_staff.first_name} {old_staff.last_name}" if old_staff else None
old_staff_id = str(old_staff.id) if old_staff else None
# Initialize metadata if needed
if not complaint.metadata:
complaint.metadata = {}
# Update or create ai_analysis in metadata with bilingual support and emotion
complaint.metadata['ai_analysis'] = {
'title_en': analysis.get('title_en', ''),
'title_ar': analysis.get('title_ar', ''),
'short_description_en': analysis.get('short_description_en', ''),
'short_description_ar': analysis.get('short_description_ar', ''),
'suggested_action_en': analysis.get('suggested_action_en', ''),
'suggested_action_ar': analysis.get('suggested_action_ar', ''),
'reasoning_en': analysis.get('reasoning_en', ''),
'reasoning_ar': analysis.get('reasoning_ar', ''),
'emotion': emotion_analysis.get('emotion', 'neutral'),
'emotion_intensity': emotion_analysis.get('intensity', 0.0),
'emotion_confidence': emotion_analysis.get('confidence', 0.0),
'analyzed_at': timezone.now().isoformat(),
'old_severity': old_severity,
'old_priority': old_priority,
'old_category': old_category_name,
'old_category_id': old_category_id,
'old_department': old_department_name,
'old_department_id': old_department_id,
'old_staff': old_staff_name,
'old_staff_id': old_staff_id,
'extracted_staff_names': staff_names,
'primary_staff_name': primary_staff_name,
'staff_matches': all_staff_matches,
'matched_staff_id': matched_staff_id,
'staff_confidence': staff_confidence,
'staff_matching_method': staff_matching_method,
'needs_staff_review': needs_staff_review,
'staff_match_count': len(all_staff_matches)
}
complaint.save(update_fields=['severity', 'priority', 'category', 'department', 'staff', 'title', 'metadata'])
# Re-calculate SLA due date based on new severity
complaint.due_at = complaint.calculate_sla_due_date()
complaint.save(update_fields=['due_at'])
# Create timeline update for AI completion
from apps.complaints.models import ComplaintUpdate
# Build bilingual message
emotion_display = emotion_analysis.get('emotion', 'neutral')
emotion_intensity = emotion_analysis.get('intensity', 0.0)
# Build English message
message_en = f"AI analysis complete: Severity={analysis['severity']}, Priority={analysis['priority']}, Category={analysis.get('category', 'N/A')}, Department={department_name or 'N/A'}"
if matched_staff_id:
message_en += f", Staff={f'{complaint.staff.first_name} {complaint.staff.last_name}' if complaint.staff else 'N/A'} (confidence: {staff_confidence:.2f})"
message_en += f", Emotion={emotion_display} (Intensity: {emotion_intensity:.2f})"
# Build Arabic message
message_ar = f"اكتمل تحليل الذكاء الاصطناعي: الشدة={analysis['severity']}, الأولوية={analysis['priority']}, الفئة={analysis.get('category', 'N/A')}, القسم={department_name or 'N/A'}"
if matched_staff_id and complaint.staff:
staff_name_ar = complaint.staff.first_name_ar if complaint.staff.first_name_ar else complaint.staff.first_name
message_ar += f", الموظف={staff_name_ar} {complaint.staff.last_name_ar if complaint.staff.last_name_ar else complaint.staff.last_name} (الثقة: {staff_confidence:.2f})"
message_ar += f", العاطفة={emotion_display} (الشدة: {emotion_intensity:.2f})"
message = f"{message_en}\n\n{message_ar}"
ComplaintUpdate.objects.create(
complaint=complaint,
update_type='note',
message=message
)
# PX Action creation is now MANDATORY for all complaints
action_id = None
try:
logger.info(f"Creating PX Action for complaint {complaint_id} (Mandatory for all complaints)")
# Generate PX Action data using AI
action_data = AIService.create_px_action_from_complaint(complaint)
# Create PX Action object
from apps.px_action_center.models import PXAction, PXActionLog
from django.contrib.contenttypes.models import ContentType
complaint_ct = ContentType.objects.get_for_model(Complaint)
action = PXAction.objects.create(
source_type='complaint',
content_type=complaint_ct,
object_id=complaint.id,
title=action_data['title'],
description=action_data['description'],
hospital=complaint.hospital,
department=complaint.department,
category=action_data['category'],
priority=action_data['priority'],
severity=action_data['severity'],
status='open',
metadata={
'source_complaint_id': str(complaint.id),
'source_complaint_title': complaint.title,
'ai_generated': True,
'auto_created': True,
'ai_reasoning': action_data.get('reasoning', '')
}
)
action_id = str(action.id)
# Create action log entry
PXActionLog.objects.create(
action=action,
log_type='note',
message=f"Action automatically generated by AI for complaint: {complaint.title}",
metadata={
'complaint_id': str(complaint.id),
'ai_generated': True,
'auto_created': True,
'category': action_data['category'],
'priority': action_data['priority'],
'severity': action_data['severity']
}
)
# Create complaint update
ComplaintUpdate.objects.create(
complaint=complaint,
update_type='note',
message=f"PX Action automatically created from AI-generated suggestion (Action #{action.id}) - {action_data['category']}",
metadata={'action_id': str(action.id), 'category': action_data['category']}
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='px_action_auto_created',
description=f"PX Action automatically created from AI analysis for complaint: {complaint.title}",
content_object=action,
metadata={
'complaint_id': str(complaint.id),
'category': action_data['category'],
'priority': action_data['priority'],
'severity': action_data['severity'],
'ai_reasoning': action_data.get('reasoning', '')
}
)
logger.info(f"PX Action {action.id} automatically created for complaint {complaint_id}")
except Exception as e:
logger.error(f"Error auto-creating PX Action for complaint {complaint_id}: {str(e)}", exc_info=True)
# Don't fail the entire task if PX Action creation fails
action_id = None
logger.info(
f"AI analysis complete for complaint {complaint_id}: "
f"severity={old_severity}->{analysis['severity']}, "
f"priority={old_priority}->{analysis['priority']}, "
f"category={old_category_name}->{analysis['category']}, "
f"department={old_department_name}->{department_name}, "
f"title_en={analysis.get('title_en', '')}"
)
return {
'status': 'success',
'complaint_id': str(complaint_id),
'severity': analysis['severity'],
'priority': analysis['priority'],
'category': analysis['category'],
'department': department_name,
'title_en': analysis.get('title_en', ''),
'title_ar': analysis.get('title_ar', ''),
'short_description_en': analysis.get('short_description_en', ''),
'short_description_ar': analysis.get('short_description_ar', ''),
'suggested_action_en': analysis.get('suggested_action_en', ''),
'suggested_action_ar': analysis.get('suggested_action_ar', ''),
'reasoning_en': analysis.get('reasoning_en', ''),
'reasoning_ar': analysis.get('reasoning_ar', ''),
'emotion': emotion_analysis.get('emotion', 'neutral'),
'emotion_intensity': emotion_analysis.get('intensity', 0.0),
'emotion_confidence': emotion_analysis.get('confidence', 0.0),
'old_severity': old_severity,
'old_priority': old_priority,
'px_action_id': action_id,
'px_action_auto_created': action_id is not None
}
except AIServiceError as e:
logger.error(f"AI service error for complaint {complaint_id}: {str(e)}")
# Keep default values (medium/medium) and log the error
return {
'status': 'ai_error',
'complaint_id': str(complaint_id),
'reason': str(e)
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error analyzing complaint {complaint_id} with AI: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def send_complaint_notification(complaint_id, event_type):
"""
Send notification for complaint events.
Args:
complaint_id: UUID of the Complaint
event_type: Type of event (created, assigned, overdue, escalated, resolved, closed)
Returns:
dict: Result with notification status
"""
from apps.complaints.models import Complaint
from apps.notifications.services import NotificationService
try:
complaint = Complaint.objects.select_related(
'hospital', 'patient', 'assigned_to', 'department'
).get(id=complaint_id)
# Determine recipients based on event type
recipients = []
if event_type == 'created':
# Notify assigned user or department manager
if complaint.assigned_to:
recipients.append(complaint.assigned_to)
elif complaint.department and complaint.department.manager:
recipients.append(complaint.department.manager)
elif event_type == 'assigned':
# Notify assignee
if complaint.assigned_to:
recipients.append(complaint.assigned_to)
elif event_type in ['overdue', 'escalated']:
# Notify assignee and their manager
if complaint.assigned_to:
recipients.append(complaint.assigned_to)
if complaint.department and complaint.department.manager:
recipients.append(complaint.department.manager)
elif event_type == 'resolved':
# Notify patient
recipients.append(complaint.patient)
elif event_type == 'closed':
# Notify patient
recipients.append(complaint.patient)
# Send notifications
notification_count = 0
for recipient in recipients:
try:
# Check if NotificationService has send_notification method
if hasattr(NotificationService, 'send_notification'):
NotificationService.send_notification(
recipient=recipient,
title=f"Complaint {event_type.title()}: {complaint.title[:50]}",
message=f"Complaint #{str(complaint.id)[:8]} has been {event_type}.",
notification_type='complaint',
related_object=complaint
)
notification_count += 1
else:
logger.warning(f"NotificationService.send_notification method not available")
except Exception as e:
logger.error(f"Failed to send notification to {recipient}: {str(e)}")
logger.info(f"Sent {notification_count} notifications for complaint {complaint_id} event: {event_type}")
return {
'status': 'sent',
'notification_count': notification_count,
'event_type': event_type
}
except Complaint.DoesNotExist:
error_msg = f"Complaint {complaint_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error sending complaint notification: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
def get_explanation_sla_config(hospital):
"""
Get explanation SLA configuration for a hospital.
Returns the first active ExplanationSLAConfig for the hospital.
Returns None if no config exists (will use defaults).
"""
from apps.complaints.models import ExplanationSLAConfig
try:
return ExplanationSLAConfig.objects.get(
hospital=hospital,
is_active=True
)
except ExplanationSLAConfig.DoesNotExist:
return None
@shared_task
def send_explanation_request_email(explanation_id):
"""
Send email to staff requesting explanation.
Includes link with unique token for staff to submit explanation.
Sets SLA deadline based on hospital configuration.
"""
from apps.complaints.models import ComplaintExplanation
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
explanation = ComplaintExplanation.objects.select_related(
'complaint', 'staff', 'requested_by'
).get(id=explanation_id)
# Calculate SLA deadline
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
sla_hours = sla_config.response_hours if sla_config else 48
explanation.sla_due_at = timezone.now() + timezone.timedelta(hours=sla_hours)
explanation.email_sent_at = timezone.now()
explanation.save(update_fields=['sla_due_at', 'email_sent_at'])
# Prepare email
context = {
'explanation': explanation,
'complaint': explanation.complaint,
'staff': explanation.staff,
'requested_by': explanation.requested_by,
'sla_hours': sla_hours,
'due_date': explanation.sla_due_at,
'site_url': settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000',
}
subject = f"Explanation Request: Complaint #{str(explanation.complaint.id)[:8]}"
# Render email templates
message_en = render_to_string(
'complaints/emails/explanation_request_en.txt',
context
)
message_ar = render_to_string(
'complaints/emails/explanation_request_ar.txt',
context
)
# Send email
send_mail(
subject=subject,
message=f"{message_en}\n\n{message_ar}",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[explanation.staff.email],
fail_silently=False
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='explanation_request_sent',
description=f"Explanation request email sent to {explanation.staff.get_full_name()}",
content_object=explanation,
metadata={
'complaint_id': str(explanation.complaint.id),
'staff_name': explanation.staff.get_full_name(),
'sla_hours': sla_hours,
'due_date': explanation.sla_due_at.isoformat()
}
)
logger.info(
f"Explanation request email sent to {explanation.staff.get_full_name()} "
f"for complaint {explanation.complaint_id}"
)
return {'status': 'sent', 'explanation_id': str(explanation.id)}
@shared_task
def check_overdue_explanation_requests():
"""
Periodic task to check for overdue explanation requests.
Runs every 15 minutes (configured in config/celery.py).
Escalates to staff's manager if explanation not submitted within SLA.
Follows staff hierarchy via report_to field.
"""
from apps.complaints.models import ComplaintExplanation
from apps.organizations.models import Staff
now = timezone.now()
# Get explanation requests that are:
# - Not submitted (is_used=False)
# - Email sent (email_sent_at is not null)
# - Past SLA deadline
overdue_explanations = ComplaintExplanation.objects.filter(
is_used=False,
email_sent_at__isnull=False,
sla_due_at__lt=now,
escalated_to_manager__isnull=True # Not yet escalated
).select_related('complaint', 'staff', 'staff__department')
escalated_count = 0
for explanation in overdue_explanations:
# Mark as overdue
if not explanation.is_overdue:
explanation.is_overdue = True
explanation.save(update_fields=['is_overdue'])
# Get SLA config
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
# Check if auto-escalation is enabled
if not sla_config or not sla_config.auto_escalate_enabled:
logger.info(
f"Auto-escalation disabled for explanation {explanation.id}, "
f"hospital {explanation.complaint.hospital.name}"
)
continue
# Get current escalation level
current_level = explanation.metadata.get('escalation_level', 0)
# Check max escalation level
max_level = sla_config.max_escalation_levels if sla_config else 3
if current_level >= max_level:
logger.info(f"Explanation {explanation.id} reached max escalation level {max_level}")
continue
# Calculate hours overdue
hours_overdue = (now - explanation.sla_due_at).total_seconds() / 3600
# Check if we should escalate now
escalation_delay = sla_config.escalation_hours_overdue if sla_config else 0
if hours_overdue < escalation_delay:
logger.info(
f"Explanation {explanation.id} overdue by {hours_overdue:.1f}h, "
f"waiting for escalation delay of {escalation_delay}h"
)
continue
# Determine escalation target using staff hierarchy
escalation_target = None
if explanation.staff and explanation.staff.report_to:
# Escalate to staff's manager
escalation_target = explanation.staff.report_to
# Check if manager already has an explanation request for this complaint
existing_explanation = ComplaintExplanation.objects.filter(
complaint=explanation.complaint,
staff=escalation_target
).first()
if existing_explanation:
logger.info(
f"Staff {escalation_target.get_full_name()} already has an explanation "
f"request for complaint {explanation.complaint.id}, skipping escalation"
)
# Mark as escalated anyway to avoid repeated checks
explanation.escalated_to_manager = existing_explanation
explanation.escalated_at = now
explanation.metadata['escalation_level'] = current_level + 1
explanation.save(update_fields=['escalated_to_manager', 'escalated_at', 'metadata'])
escalated_count += 1
continue
# Create new explanation request for manager
new_explanation = ComplaintExplanation.objects.create(
complaint=explanation.complaint,
staff=escalation_target,
explanation='', # Will be filled by manager
requested_by=explanation.requested_by,
request_message=(
f"Escalated from {explanation.staff.get_full_name()}. "
f"Staff member did not provide explanation within SLA. "
f"Please provide your explanation about this complaint."
),
submitted_via='email_link',
metadata={
'escalated_from_explanation_id': str(explanation.id),
'escalation_level': current_level + 1,
'original_staff_id': str(explanation.staff.id),
'original_staff_name': explanation.staff.get_full_name()
}
)
# Link old explanation to new one
explanation.escalated_to_manager = new_explanation
explanation.escalated_at = now
explanation.metadata['escalation_level'] = current_level + 1
explanation.save(update_fields=['escalated_to_manager', 'escalated_at', 'metadata'])
# Send email to manager
send_explanation_request_email.delay(str(new_explanation.id))
escalated_count += 1
logger.info(
f"Escalated explanation request {explanation.id} to manager "
f"{escalation_target.get_full_name()} (Level {current_level + 1})"
)
else:
logger.warning(
f"No escalation target for explanation {explanation.id} "
f"(staff has no report_to manager)"
)
return {
'overdue_count': overdue_explanations.count(),
'escalated_count': escalated_count
}
@shared_task
def send_explanation_reminders():
"""
Send reminder emails for explanation requests approaching deadline.
Runs every hour via Celery Beat.
Sends reminder to staff if explanation not submitted and deadline approaching.
"""
from apps.complaints.models import ComplaintExplanation
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
now = timezone.now()
# Get explanation requests that:
# - Not submitted (is_used=False)
# - Email sent (email_sent_at is not null)
# - Haven't been reminded yet
# - Approaching deadline
explanations = ComplaintExplanation.objects.filter(
is_used=False,
email_sent_at__isnull=False,
reminder_sent_at__isnull=True,
escalated_to_manager__isnull=True
).select_related('complaint', 'staff')
reminder_count = 0
for explanation in explanations:
# Get SLA config
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
reminder_hours_before = sla_config.reminder_hours_before if sla_config else 12
# Calculate reminder threshold time
reminder_time = explanation.sla_due_at - timezone.timedelta(hours=reminder_hours_before)
# Check if we should send reminder now
if now >= reminder_time:
# Calculate hours remaining
hours_remaining = (explanation.sla_due_at - now).total_seconds() / 3600
if hours_remaining < 0:
continue # Already overdue, will be handled by check_overdue_explanation_requests
# Prepare email context
context = {
'explanation': explanation,
'complaint': explanation.complaint,
'staff': explanation.staff,
'hours_remaining': int(hours_remaining),
'due_date': explanation.sla_due_at,
'site_url': settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000',
}
subject = f"Reminder: Explanation Request - Complaint #{str(explanation.complaint.id)[:8]}"
try:
# Render email templates
message_en = render_to_string(
'complaints/emails/explanation_reminder_en.txt',
context
)
message_ar = render_to_string(
'complaints/emails/explanation_reminder_ar.txt',
context
)
# Send email
send_mail(
subject=subject,
message=f"{message_en}\n\n{message_ar}",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[explanation.staff.email],
fail_silently=False
)
# Update explanation
explanation.reminder_sent_at = now
explanation.save(update_fields=['reminder_sent_at'])
reminder_count += 1
logger.info(
f"Explanation reminder sent to {explanation.staff.get_full_name()} "
f"for complaint {explanation.complaint_id} "
f"({int(hours_remaining)} hours remaining)"
)
except Exception as e:
logger.error(f"Failed to send explanation reminder for {explanation.id}: {str(e)}")
return {
'status': 'completed',
'reminders_sent': reminder_count
}
@shared_task
def send_sla_reminders():
"""
Send SLA reminder emails for complaints approaching deadline.
Runs every hour via Celery Beat.
Finds complaints where reminder should be sent based on hospital's SLA configuration.
Sends reminder email to assigned user or department manager.
Creates timeline entry for reminder sent.
Returns:
dict: Result with reminder count and details
"""
from apps.complaints.models import Complaint, ComplaintUpdate, ComplaintStatus, ComplaintSLAConfig
from apps.notifications.services import NotificationService
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
try:
now = timezone.now()
# Get active complaints that haven't been reminded yet OR need second reminder
active_complaints = Complaint.objects.filter(
status__in=[ComplaintStatus.OPEN, ComplaintStatus.IN_PROGRESS]
).filter(
models.Q(reminder_sent_at__isnull=True) | # First reminder not sent
models.Q(
reminder_sent_at__isnull=False,
second_reminder_sent_at__isnull=True,
reminder_sent_at__lt=now - timezone.timedelta(hours=1) # At least 1 hour after first reminder
)
).select_related('hospital', 'patient', 'assigned_to', 'department', 'category')
reminder_count = 0
skipped_count = 0
for complaint in active_complaints:
# Get SLA config for this complaint
try:
sla_config = ComplaintSLAConfig.objects.get(
hospital=complaint.hospital,
severity=complaint.severity,
priority=complaint.priority,
is_active=True
)
reminder_hours_before = sla_config.reminder_hours_before
except ComplaintSLAConfig.DoesNotExist:
# Use default of 24 hours
reminder_hours_before = 24
# Calculate reminder threshold time
reminder_time = complaint.due_at - timezone.timedelta(hours=reminder_hours_before)
# Check if we should send FIRST reminder now
if now >= reminder_time and complaint.reminder_sent_at is None:
# Determine recipient
recipient = complaint.assigned_to
if not recipient and complaint.department and complaint.department.manager:
recipient = complaint.department.manager
if not recipient:
logger.warning(
f"No recipient for SLA reminder on complaint {complaint.id} "
f"(no assigned user or department manager)"
)
skipped_count += 1
continue
# Calculate hours remaining
hours_remaining = (complaint.due_at - now).total_seconds() / 3600
# Prepare email context
context = {
'complaint': complaint,
'recipient': recipient,
'hours_remaining': int(hours_remaining),
'due_date': complaint.due_at,
'site_url': f"{settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000'}",
}
# Render email templates
subject = f"SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
try:
# Try to send via NotificationService first
if hasattr(NotificationService, 'send_notification'):
NotificationService.send_notification(
recipient=recipient,
title=subject,
message=(
f"This is a reminder that complaint #{str(complaint.id)[:8]} "
f"is due in {int(hours_remaining)} hours. "
f"Please take action to avoid SLA breach."
),
notification_type='complaint',
related_object=complaint,
metadata={'event_type': 'sla_reminder'}
)
else:
# Fallback to direct email
message_en = render_to_string(
'complaints/emails/sla_reminder_en.txt',
context
)
message_ar = render_to_string(
'complaints/emails/sla_reminder_ar.txt',
context
)
# Send to recipient's email
recipient_email = recipient.email if hasattr(recipient, 'email') else None
if recipient_email:
send_mail(
subject=subject,
message=f"{message_en}\n\n{message_ar}",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[recipient_email],
fail_silently=False
)
else:
logger.warning(f"No email for recipient {recipient}")
skipped_count += 1
continue
# Update complaint
complaint.reminder_sent_at = now
complaint.save(update_fields=['reminder_sent_at'])
# Create timeline entry
ComplaintUpdate.objects.create(
complaint=complaint,
update_type='note',
message=(
f"SLA reminder sent to {recipient.get_full_name()}. "
f"Complaint is due in {int(hours_remaining)} hours."
),
created_by=None, # System action
metadata={
'event_type': 'sla_reminder',
'hours_remaining': int(hours_remaining),
'recipient_id': str(recipient.id)
}
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='sla_reminder_sent',
description=f"SLA reminder sent for complaint {complaint.id}",
content_object=complaint,
metadata={
'recipient': recipient.get_full_name(),
'hours_remaining': int(hours_remaining)
}
)
reminder_count += 1
logger.info(
f"SLA reminder sent for complaint {complaint.id} "
f"to {recipient.get_full_name()} "
f"({int(hours_remaining)} hours remaining)"
)
# Trigger reminder-based escalation check
escalate_after_reminder.delay(str(complaint.id))
except Exception as e:
logger.error(f"Failed to send SLA reminder for complaint {complaint.id}: {str(e)}")
skipped_count += 1
# Check if we should send SECOND reminder now
elif (sla_config.second_reminder_enabled and
complaint.reminder_sent_at is not None and
complaint.second_reminder_sent_at is None):
# Calculate second reminder threshold time
second_reminder_hours_before = sla_config.second_reminder_hours_before
second_reminder_time = complaint.due_at - timezone.timedelta(hours=second_reminder_hours_before)
if now >= second_reminder_time:
# Determine recipient
recipient = complaint.assigned_to
if not recipient and complaint.department and complaint.department.manager:
recipient = complaint.department.manager
if not recipient:
logger.warning(
f"No recipient for second SLA reminder on complaint {complaint.id} "
f"(no assigned user or department manager)"
)
skipped_count += 1
continue
# Calculate hours remaining
hours_remaining = (complaint.due_at - now).total_seconds() / 3600
# Prepare email context
context = {
'complaint': complaint,
'recipient': recipient,
'hours_remaining': int(hours_remaining),
'due_date': complaint.due_at,
'site_url': f"{settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000'}",
}
# Render email templates
subject = f"URGENT - Second SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
try:
# Try to send via NotificationService first
if hasattr(NotificationService, 'send_notification'):
NotificationService.send_notification(
recipient=recipient,
title=subject,
message=(
f"This is the SECOND and FINAL reminder that complaint #{str(complaint.id)[:8]} "
f"is due in {int(hours_remaining)} hours. "
f"URGENT action required to avoid SLA breach and escalation."
),
notification_type='complaint',
related_object=complaint,
metadata={'event_type': 'sla_second_reminder'}
)
else:
# Fallback to direct email
message_en = render_to_string(
'complaints/emails/sla_second_reminder_en.txt',
context
)
message_ar = render_to_string(
'complaints/emails/sla_second_reminder_ar.txt',
context
)
# Send to recipient's email
recipient_email = recipient.email if hasattr(recipient, 'email') else None
if recipient_email:
send_mail(
subject=subject,
message=f"{message_en}\n\n{message_ar}",
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[recipient_email],
fail_silently=False
)
else:
logger.warning(f"No email for recipient {recipient}")
skipped_count += 1
continue
# Update complaint
complaint.second_reminder_sent_at = now
complaint.save(update_fields=['second_reminder_sent_at'])
# Create timeline entry
ComplaintUpdate.objects.create(
complaint=complaint,
update_type='note',
message=(
f"SECOND SLA reminder sent to {recipient.get_full_name()}. "
f"Complaint is due in {int(hours_remaining)} hours. "
f"This is the final reminder before escalation."
),
created_by=None, # System action
metadata={
'event_type': 'sla_second_reminder',
'hours_remaining': int(hours_remaining),
'recipient_id': str(recipient.id)
}
)
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='sla_second_reminder_sent',
description=f"Second SLA reminder sent for complaint {complaint.id}",
content_object=complaint,
metadata={
'recipient': recipient.get_full_name(),
'hours_remaining': int(hours_remaining)
}
)
reminder_count += 1
logger.info(
f"Second SLA reminder sent for complaint {complaint.id} "
f"to {recipient.get_full_name()} "
f"({int(hours_remaining)} hours remaining)"
)
# Trigger reminder-based escalation check (more urgent now)
escalate_after_reminder.delay(str(complaint.id))
except Exception as e:
logger.error(f"Failed to send second SLA reminder for complaint {complaint.id}: {str(e)}")
skipped_count += 1
logger.info(
f"SLA reminder check complete: {reminder_count} sent, {skipped_count} skipped"
)
return {
'status': 'completed',
'reminders_sent': reminder_count,
'skipped': skipped_count
}
except Exception as e:
error_msg = f"Error in SLA reminder task: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}