2575 lines
111 KiB
Python
2575 lines
111 KiB
Python
"""
|
|
Complaints Celery tasks
|
|
|
|
This module contains tasks for:
|
|
- Checking overdue complaints
|
|
- Sending SLA reminders
|
|
- Triggering resolution satisfaction surveys
|
|
- Creating PX actions from complaints
|
|
- AI-powered complaint analysis
|
|
"""
|
|
|
|
import logging
|
|
from typing import Optional, Dict, Any, Tuple
|
|
|
|
from celery import shared_task
|
|
from django.db import transaction
|
|
from django.db.models import Q
|
|
from django.utils import timezone
|
|
from django.template.loader import render_to_string
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def match_staff_from_name(
|
|
staff_name: str,
|
|
hospital_id: str,
|
|
department_name: Optional[str] = None,
|
|
return_all: bool = False,
|
|
fuzzy_threshold: float = 0.80,
|
|
max_matches: int = 3,
|
|
) -> Tuple[list, float, str]:
|
|
"""
|
|
Enhanced staff matching with fuzzy matching and improved accuracy.
|
|
|
|
This function uses fuzzy string matching (Levenshtein distance) to find staff members
|
|
with improved handling of:
|
|
- Name variations (with/without hyphens, different spellings)
|
|
- Typos and minor errors
|
|
- Matching against original full name field
|
|
- Better confidence scoring
|
|
- STRICTER matching to avoid false positives
|
|
|
|
Args:
|
|
staff_name: Name extracted from complaint (without titles)
|
|
hospital_id: Hospital ID to search within
|
|
department_name: Optional department name to prioritize matching
|
|
return_all: If True, return all matching staff. If False, return single best match.
|
|
fuzzy_threshold: Minimum similarity ratio for fuzzy matches (0.0 to 1.0). Default 0.80 for stricter matching.
|
|
max_matches: Maximum number of matches to return when return_all=True. Default 3.
|
|
|
|
Returns:
|
|
If return_all=True: Tuple of (matches_list, confidence_score, matching_method)
|
|
- matches_list: List of dicts with matched staff details
|
|
- confidence_score: Float from 0.0 to 1.0 (best match confidence)
|
|
- matching_method: Description of how staff was matched
|
|
|
|
If return_all=False: Tuple of (staff_id, confidence_score, matching_method)
|
|
- staff_id: UUID of matched staff or None
|
|
- confidence_score: Float from 0.0 to 1.0
|
|
- matching_method: Description of how staff was matched
|
|
"""
|
|
from apps.organizations.models import Staff, Department
|
|
|
|
if not staff_name or not staff_name.strip():
|
|
return [], 0.0, "No staff name provided"
|
|
|
|
staff_name = staff_name.strip()
|
|
normalized_input = _normalize_name(staff_name)
|
|
|
|
matches = []
|
|
|
|
# Build base query - staff from this hospital, active status
|
|
base_query = Staff.objects.filter(hospital_id=hospital_id, status="active")
|
|
|
|
# Get department if specified
|
|
dept_id = None
|
|
if department_name:
|
|
department = Department.objects.filter(
|
|
hospital_id=hospital_id, name__iexact=department_name, status="active"
|
|
).first()
|
|
if department:
|
|
dept_id = department.id
|
|
|
|
# Fetch all staff to perform fuzzy matching
|
|
all_staff = list(base_query)
|
|
|
|
# If department specified, filter
|
|
if dept_id:
|
|
dept_staff = [s for s in all_staff if s.department if str(s.department.id) == dept_id]
|
|
else:
|
|
dept_staff = []
|
|
|
|
# ========================================
|
|
# LAYER 1: EXACT MATCHES
|
|
# ========================================
|
|
|
|
# 1a. Exact match on first_name + last_name (English)
|
|
words = staff_name.split()
|
|
if len(words) >= 2:
|
|
first_name = words[0]
|
|
last_name = " ".join(words[1:])
|
|
|
|
for staff in all_staff:
|
|
if staff.first_name.lower() == first_name.lower() and staff.last_name.lower() == last_name.lower():
|
|
confidence = 0.95 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.90
|
|
method = f"Exact English match in {'correct' if (dept_id and staff.department and str(staff.department.id) == dept_id) else 'any'} department"
|
|
|
|
if not any(m["id"] == str(staff.id) for m in matches):
|
|
matches.append(_create_match_dict(staff, confidence, method, staff_name))
|
|
logger.info(f"EXACT MATCH (EN): {staff.first_name} {staff.last_name} == {first_name} {last_name}")
|
|
|
|
# 1b. Exact match on full Arabic name
|
|
for staff in all_staff:
|
|
full_arabic = f"{staff.first_name_ar} {staff.last_name_ar}".strip()
|
|
if full_arabic == staff_name:
|
|
confidence = 0.95 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.90
|
|
method = f"Exact Arabic match in {'correct' if (dept_id and staff.department and str(staff.department.id) == dept_id) else 'any'} department"
|
|
|
|
if not any(m["id"] == str(staff.id) for m in matches):
|
|
matches.append(_create_match_dict(staff, confidence, method, staff_name))
|
|
logger.info(f"EXACT MATCH (AR): {full_arabic} == {staff_name}")
|
|
|
|
# 1c. Exact match on 'name' field (original full name)
|
|
for staff in all_staff:
|
|
if staff.name and staff.name.lower() == staff_name.lower():
|
|
confidence = 0.93
|
|
method = "Exact match on original name field"
|
|
|
|
if not any(m["id"] == str(staff.id) for m in matches):
|
|
matches.append(_create_match_dict(staff, confidence, method, staff_name))
|
|
logger.info(f"EXACT MATCH (name field): {staff.name} == {staff_name}")
|
|
|
|
# ========================================
|
|
# LAYER 2: FUZZY MATCHES (if no exact)
|
|
# ========================================
|
|
|
|
if not matches:
|
|
logger.info(f"No exact matches found, trying fuzzy matching for: {staff_name}")
|
|
|
|
for staff in all_staff:
|
|
# Try different name combinations
|
|
name_combinations = [
|
|
f"{staff.first_name} {staff.last_name}",
|
|
f"{staff.first_name_ar} {staff.last_name_ar}",
|
|
staff.name or "",
|
|
staff.first_name,
|
|
staff.last_name,
|
|
staff.first_name_ar,
|
|
staff.last_name_ar,
|
|
]
|
|
|
|
# Check if any combination matches fuzzily
|
|
best_ratio = 0.0
|
|
best_match_name = ""
|
|
|
|
for combo in name_combinations:
|
|
if not combo:
|
|
continue
|
|
ratio = _fuzzy_match_ratio(staff_name, combo)
|
|
if ratio > best_ratio:
|
|
best_ratio = ratio
|
|
best_match_name = combo
|
|
|
|
# If good fuzzy match found with STRONGER threshold
|
|
if best_ratio >= fuzzy_threshold:
|
|
# Adjust confidence based on match quality and department
|
|
dept_bonus = 0.10 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.0
|
|
confidence = best_ratio * 0.90 + dept_bonus # Higher confidence for better fuzzy matches
|
|
|
|
method = f"Fuzzy match ({best_ratio:.2f}) on '{best_match_name}'"
|
|
|
|
if not any(m["id"] == str(staff.id) for m in matches):
|
|
matches.append(_create_match_dict(staff, confidence, method, staff_name))
|
|
logger.info(f"FUZZY MATCH ({best_ratio:.2f}): {best_match_name} ~ {staff_name}")
|
|
|
|
# ========================================
|
|
# LAYER 3: PARTIAL/WORD MATCHES (only if still no matches and exact/partial match is high)
|
|
# ========================================
|
|
|
|
if not matches:
|
|
logger.info(f"No fuzzy matches found, trying partial/word matching for: {staff_name}")
|
|
|
|
# Split input name into words
|
|
input_words = [_normalize_name(w) for w in staff_name.split() if _normalize_name(w)]
|
|
|
|
# Require at least some words to match
|
|
if len(input_words) >= 2:
|
|
for staff in all_staff:
|
|
# Build list of all name fields
|
|
staff_names = [
|
|
staff.first_name,
|
|
staff.last_name,
|
|
staff.first_name_ar,
|
|
staff.last_name_ar,
|
|
staff.name or "",
|
|
]
|
|
|
|
# Count word matches
|
|
match_count = 0
|
|
total_words = len(input_words)
|
|
|
|
for word in input_words:
|
|
word_matched = False
|
|
for staff_name_field in staff_names:
|
|
if _normalize_name(staff_name_field) == word or word in _normalize_name(staff_name_field):
|
|
word_matched = True
|
|
break
|
|
if word_matched:
|
|
match_count += 1
|
|
|
|
# STRONGER requirement: At least 80% of words must match
|
|
if (match_count / total_words) >= 0.8 and match_count >= 2:
|
|
confidence = 0.70 + (match_count / total_words) * 0.10
|
|
dept_bonus = (
|
|
0.10 if (dept_id and staff.department and str(staff.department.id) == dept_id) else 0.0
|
|
)
|
|
confidence += dept_bonus
|
|
|
|
method = f"Partial match ({match_count}/{total_words} words)"
|
|
|
|
if not any(m["id"] == str(staff.id) for m in matches):
|
|
matches.append(_create_match_dict(staff, confidence, method, staff_name))
|
|
logger.info(
|
|
f"PARTIAL MATCH ({match_count}/{total_words}): {staff.first_name} {staff.last_name}"
|
|
)
|
|
|
|
# ========================================
|
|
# FINAL: SORT AND RETURN
|
|
# ========================================
|
|
|
|
if matches:
|
|
# Sort by confidence (descending)
|
|
matches.sort(key=lambda x: x["confidence"], reverse=True)
|
|
|
|
# Limit number of matches if return_all
|
|
if return_all and len(matches) > max_matches:
|
|
matches = matches[:max_matches]
|
|
|
|
best_confidence = matches[0]["confidence"]
|
|
best_method = matches[0]["matching_method"]
|
|
|
|
logger.info(
|
|
f"Returning {len(matches)} match(es) for '{staff_name}' (max: {max_matches}). "
|
|
f"Best: {matches[0]['name_en']} (confidence: {best_confidence:.2f}, method: {best_method})"
|
|
)
|
|
|
|
if not return_all:
|
|
return str(matches[0]["id"]), best_confidence, best_method
|
|
else:
|
|
return matches, best_confidence, best_method
|
|
else:
|
|
logger.warning(f"No staff match found for name: '{staff_name}'")
|
|
return [], 0.0, "No match found"
|
|
|
|
|
|
def _fuzzy_match_ratio(str1: str, str2: str) -> float:
|
|
"""
|
|
Calculate fuzzy match ratio using difflib.
|
|
|
|
Args:
|
|
str1: First string
|
|
str2: Second string
|
|
|
|
Returns:
|
|
Float from 0.0 to 1.0 representing similarity
|
|
"""
|
|
try:
|
|
from difflib import SequenceMatcher
|
|
|
|
return SequenceMatcher(None, str1.lower(), str2.lower()).ratio()
|
|
except Exception:
|
|
return 0.0
|
|
|
|
|
|
def _normalize_name(name: str) -> str:
|
|
"""
|
|
Normalize name for better matching.
|
|
|
|
- Remove extra spaces
|
|
- Remove hyphens (Al-Shammari -> AlShammari)
|
|
- Convert to lowercase
|
|
- Remove common titles
|
|
"""
|
|
if not name:
|
|
return ""
|
|
|
|
name = name.strip().lower()
|
|
|
|
# Remove common titles (both English and Arabic)
|
|
titles = ["dr.", "dr", "mr.", "mr", "mrs.", "mrs", "ms.", "ms", "د.", "السيد", "السيدة", "الدكتور"]
|
|
for title in titles:
|
|
if name.startswith(title):
|
|
name = name[len(title) :].strip()
|
|
|
|
# Remove hyphens for better matching (Al-Shammari -> AlShammari)
|
|
name = name.replace("-", "")
|
|
|
|
# Remove extra spaces
|
|
while " " in name:
|
|
name = name.replace(" ", " ")
|
|
|
|
return name.strip()
|
|
|
|
|
|
def _create_match_dict(staff, confidence: float, method: str, source_name: str) -> Dict[str, Any]:
|
|
"""
|
|
Create a match dictionary for a staff member.
|
|
|
|
Args:
|
|
staff: Staff model instance
|
|
confidence: Confidence score (0.0 to 1.0)
|
|
method: Description of matching method
|
|
source_name: Original input name that was matched
|
|
|
|
Returns:
|
|
Dictionary with match details
|
|
"""
|
|
from apps.organizations.models import Department
|
|
|
|
# Get department info - try ForeignKey first, then fall back to text field
|
|
department_obj = staff.department
|
|
department_name = None
|
|
department_id = None
|
|
|
|
if department_obj:
|
|
# ForeignKey is set - use it
|
|
department_name = department_obj.name
|
|
department_id = str(department_obj.id)
|
|
elif staff.department_name:
|
|
# ForeignKey is NULL but text field has value - try to match
|
|
department_name = staff.department_name
|
|
# Try to find matching Department by name
|
|
matched_dept = Department.objects.filter(
|
|
hospital_id=staff.hospital_id, name__iexact=staff.department_name, status="active"
|
|
).first()
|
|
if matched_dept:
|
|
department_id = str(matched_dept.id)
|
|
logger.info(f"Matched staff department_name '{staff.department_name}' to Department ID: {department_id}")
|
|
|
|
return {
|
|
"id": str(staff.id),
|
|
"name_en": f"{staff.first_name} {staff.last_name}",
|
|
"name_ar": f"{staff.first_name_ar} {staff.last_name_ar}" if staff.first_name_ar and staff.last_name_ar else "",
|
|
"original_name": staff.name or "",
|
|
"job_title": staff.job_title,
|
|
"specialization": staff.specialization,
|
|
"department": department_name,
|
|
"department_id": department_id,
|
|
"section": staff.section,
|
|
"subsection": staff.subsection,
|
|
"department_name_text": staff.department_name, # Original text field value
|
|
"confidence": confidence,
|
|
"matching_method": method,
|
|
"source_name": source_name,
|
|
}
|
|
|
|
|
|
@shared_task
|
|
def check_overdue_complaints():
|
|
"""
|
|
Periodic task to check for overdue complaints.
|
|
|
|
Runs every 15 minutes (configured in config/celery.py).
|
|
Updates is_overdue flag for complaints past their SLA deadline.
|
|
Triggers automatic escalation based on escalation rules.
|
|
"""
|
|
from apps.complaints.models import Complaint, ComplaintStatus
|
|
|
|
# Get active complaints (not closed or cancelled)
|
|
active_complaints = Complaint.objects.filter(
|
|
status__in=[ComplaintStatus.OPEN, ComplaintStatus.IN_PROGRESS, ComplaintStatus.RESOLVED]
|
|
).select_related("hospital", "patient", "department")
|
|
|
|
overdue_count = 0
|
|
escalated_count = 0
|
|
|
|
for complaint in active_complaints:
|
|
if complaint.check_overdue():
|
|
overdue_count += 1
|
|
logger.warning(f"Complaint {complaint.id} is overdue: {complaint.title} (due: {complaint.due_at})")
|
|
|
|
# Trigger automatic escalation
|
|
result = escalate_complaint_auto.delay(str(complaint.id))
|
|
if result:
|
|
escalated_count += 1
|
|
|
|
if overdue_count > 0:
|
|
logger.info(f"Found {overdue_count} overdue complaints, triggered {escalated_count} escalations")
|
|
|
|
return {"overdue_count": overdue_count, "escalated_count": escalated_count}
|
|
|
|
|
|
@shared_task
|
|
def send_complaint_resolution_survey(complaint_id):
|
|
"""
|
|
Send resolution satisfaction survey when complaint is closed.
|
|
|
|
This task is triggered when a complaint status changes to CLOSED.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with survey_instance_id
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
from apps.core.services import create_audit_log
|
|
from apps.surveys.models import SurveyInstance, SurveyTemplate
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("patient", "hospital").get(id=complaint_id)
|
|
|
|
# Check if survey already sent
|
|
if complaint.resolution_survey:
|
|
logger.info(f"Resolution survey already sent for complaint {complaint_id}")
|
|
return {"status": "skipped", "reason": "already_sent"}
|
|
|
|
# Get resolution satisfaction survey template
|
|
try:
|
|
survey_template = SurveyTemplate.objects.get(
|
|
hospital=complaint.hospital, survey_type="complaint_resolution", is_active=True
|
|
)
|
|
except SurveyTemplate.DoesNotExist:
|
|
logger.warning(f"No resolution satisfaction survey template found for hospital {complaint.hospital.name}")
|
|
return {"status": "skipped", "reason": "no_template"}
|
|
|
|
# Create survey instance
|
|
with transaction.atomic():
|
|
survey_instance = SurveyInstance.objects.create(
|
|
survey_template=survey_template,
|
|
patient=complaint.patient,
|
|
encounter_id=complaint.encounter_id,
|
|
delivery_channel="sms", # Default
|
|
recipient_phone=complaint.patient.phone,
|
|
recipient_email=complaint.patient.email,
|
|
metadata={"complaint_id": str(complaint.id), "complaint_title": complaint.title},
|
|
)
|
|
|
|
# Link survey to complaint
|
|
complaint.resolution_survey = survey_instance
|
|
complaint.resolution_survey_sent_at = timezone.now()
|
|
complaint.save(update_fields=["resolution_survey", "resolution_survey_sent_at"])
|
|
|
|
# Send survey
|
|
from apps.notifications.services import NotificationService
|
|
|
|
notification_log = NotificationService.send_survey_invitation(
|
|
survey_instance=survey_instance,
|
|
language="en", # TODO: Get from patient preference
|
|
)
|
|
|
|
# Update survey status
|
|
survey_instance.status = "active"
|
|
survey_instance.sent_at = timezone.now()
|
|
survey_instance.save(update_fields=["status", "sent_at"])
|
|
|
|
# Log audit event
|
|
create_audit_log(
|
|
event_type="survey_sent",
|
|
description=f"Resolution satisfaction survey sent for complaint: {complaint.title}",
|
|
content_object=survey_instance,
|
|
metadata={"complaint_id": str(complaint.id), "survey_template": survey_template.name},
|
|
)
|
|
|
|
logger.info(f"Resolution satisfaction survey sent for complaint {complaint.id}")
|
|
|
|
return {
|
|
"status": "sent",
|
|
"survey_instance_id": str(survey_instance.id),
|
|
"notification_log_id": str(notification_log.id),
|
|
}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
except Exception as e:
|
|
error_msg = f"Error sending resolution survey: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def check_resolution_survey_threshold(survey_instance_id, complaint_id):
|
|
"""
|
|
Check if resolution survey score breaches threshold and create PX Action if needed.
|
|
|
|
This task is triggered when a complaint resolution survey is completed.
|
|
|
|
Args:
|
|
survey_instance_id: UUID of the SurveyInstance
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with action status
|
|
"""
|
|
from apps.complaints.models import Complaint, ComplaintThreshold
|
|
from apps.surveys.models import SurveyInstance
|
|
from apps.px_action_center.models import PXAction
|
|
from django.contrib.contenttypes.models import ContentType
|
|
|
|
try:
|
|
survey = SurveyInstance.objects.get(id=survey_instance_id)
|
|
complaint = Complaint.objects.select_related("hospital", "patient").get(id=complaint_id)
|
|
|
|
# Get threshold for this hospital
|
|
try:
|
|
threshold = ComplaintThreshold.objects.get(
|
|
hospital=complaint.hospital, threshold_type="resolution_survey_score", is_active=True
|
|
)
|
|
except ComplaintThreshold.DoesNotExist:
|
|
logger.info(f"No resolution survey threshold configured for hospital {complaint.hospital.name_en}")
|
|
return {"status": "no_threshold"}
|
|
|
|
# Check if threshold is breached
|
|
if threshold.check_threshold(survey.score):
|
|
logger.warning(
|
|
f"Resolution survey score {survey.score} breaches threshold {threshold.threshold_value} "
|
|
f"for complaint {complaint_id}"
|
|
)
|
|
|
|
# Create PX Action
|
|
complaint_ct = ContentType.objects.get_for_model(Complaint)
|
|
|
|
action = PXAction.objects.create(
|
|
title=f"Low Resolution Satisfaction: {complaint.title[:100]}",
|
|
description=(
|
|
f"Complaint resolution survey scored {survey.score}% "
|
|
f"(threshold: {threshold.threshold_value}%). "
|
|
f"Original complaint: {complaint.description[:200]}"
|
|
),
|
|
source="complaint_resolution_survey",
|
|
priority="high" if survey.score < 30 else "medium",
|
|
hospital=complaint.hospital,
|
|
department=complaint.department,
|
|
patient=complaint.patient,
|
|
content_type=complaint_ct,
|
|
object_id=complaint.id,
|
|
metadata={
|
|
"complaint_id": str(complaint.id),
|
|
"survey_id": str(survey.id),
|
|
"survey_score": survey.score,
|
|
"threshold_value": threshold.threshold_value,
|
|
},
|
|
)
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="px_action_created",
|
|
description=f"PX Action created from low resolution survey score",
|
|
content_object=action,
|
|
metadata={
|
|
"complaint_id": str(complaint.id),
|
|
"survey_score": survey.score,
|
|
"trigger": "resolution_survey_threshold",
|
|
},
|
|
)
|
|
|
|
logger.info(f"Created PX Action {action.id} from low resolution survey score")
|
|
|
|
return {
|
|
"status": "action_created",
|
|
"action_id": str(action.id),
|
|
"survey_score": survey.score,
|
|
"threshold": threshold.threshold_value,
|
|
}
|
|
else:
|
|
logger.info(f"Resolution survey score {survey.score} is above threshold {threshold.threshold_value}")
|
|
return {"status": "threshold_not_breached", "survey_score": survey.score}
|
|
|
|
except SurveyInstance.DoesNotExist:
|
|
error_msg = f"SurveyInstance {survey_instance_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error checking resolution survey threshold: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def create_action_from_complaint(complaint_id):
|
|
"""
|
|
Create PX Action from complaint (if configured).
|
|
|
|
This task is triggered when a complaint is created,
|
|
if the hospital configuration requires automatic action creation.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with action_id
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
from apps.organizations.models import Hospital
|
|
from apps.px_action_center.models import PXAction
|
|
from django.contrib.contenttypes.models import ContentType
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("hospital", "patient", "department").get(id=complaint_id)
|
|
|
|
# Check if hospital has auto-create enabled
|
|
# For now, we'll check metadata on hospital or use a simple rule
|
|
# In production, you'd have a HospitalComplaintConfig model
|
|
# Handle case where metadata field might not exist (legacy data)
|
|
hospital_metadata = getattr(complaint.hospital, "metadata", None)
|
|
if hospital_metadata is None:
|
|
hospital_metadata = {}
|
|
auto_create = hospital_metadata.get("auto_create_action_on_complaint", False)
|
|
|
|
if not auto_create:
|
|
logger.info(f"Auto-create PX Action disabled for hospital {complaint.hospital.name}")
|
|
return {"status": "disabled"}
|
|
|
|
# Use JSON-serializable values instead of model objects
|
|
category_name = complaint.category.name_en if complaint.category else None
|
|
category_id = str(complaint.category.id) if complaint.category else None
|
|
|
|
# Create PX Action
|
|
complaint_ct = ContentType.objects.get_for_model(Complaint)
|
|
|
|
action = PXAction.objects.create(
|
|
title=f"New Complaint: {complaint.title[:100]}",
|
|
description=complaint.description[:500],
|
|
source="complaint",
|
|
priority=complaint.priority,
|
|
hospital=complaint.hospital,
|
|
department=complaint.department,
|
|
patient=complaint.patient,
|
|
content_type=complaint_ct,
|
|
object_id=complaint.id,
|
|
metadata={
|
|
"complaint_id": str(complaint.id),
|
|
"complaint_category": category_name,
|
|
"complaint_category_id": category_id,
|
|
"complaint_severity": complaint.severity,
|
|
},
|
|
)
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="px_action_created",
|
|
description=f"PX Action created from complaint",
|
|
content_object=action,
|
|
metadata={"complaint_id": str(complaint.id), "trigger": "complaint_creation"},
|
|
)
|
|
|
|
logger.info(f"Created PX Action {action.id} from complaint {complaint_id}")
|
|
|
|
return {"status": "action_created", "action_id": str(action.id)}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error creating action from complaint: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def escalate_complaint_auto(complaint_id):
|
|
"""
|
|
Automatically escalate complaint based on escalation rules.
|
|
|
|
This task is triggered when a complaint becomes overdue.
|
|
It finds matching escalation rules and reassigns the complaint.
|
|
Supports multi-level escalation with tracking.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with escalation status
|
|
"""
|
|
from apps.complaints.models import Complaint, ComplaintUpdate, EscalationRule
|
|
from apps.accounts.models import User
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("hospital", "department", "assigned_to").get(id=complaint_id)
|
|
|
|
# Get current escalation level from metadata
|
|
current_level = complaint.metadata.get("escalation_level", 0)
|
|
|
|
# Calculate hours overdue
|
|
hours_overdue = (timezone.now() - complaint.due_at).total_seconds() / 3600
|
|
|
|
# Get applicable escalation rules for this hospital, ordered by escalation_level
|
|
rules = EscalationRule.objects.filter(
|
|
hospital=complaint.hospital, is_active=True, trigger_on_overdue=True
|
|
).order_by("escalation_level", "order")
|
|
|
|
# Filter rules by severity and priority if specified
|
|
if complaint.severity:
|
|
rules = rules.filter(Q(severity_filter="") | Q(severity_filter=complaint.severity))
|
|
|
|
if complaint.priority:
|
|
rules = rules.filter(Q(priority_filter="") | Q(priority_filter=complaint.priority))
|
|
|
|
# Find matching rule for next escalation level
|
|
matching_rule = None
|
|
for rule in rules:
|
|
# Check if this is the next escalation level
|
|
if rule.escalation_level == current_level + 1:
|
|
# Check if we've exceeded trigger hours
|
|
if hours_overdue >= rule.trigger_hours_overdue:
|
|
# Check if we've exceeded max level
|
|
max_level = rule.max_escalation_level
|
|
if current_level >= max_level:
|
|
logger.info(f"Complaint {complaint_id} has reached max escalation level {max_level}")
|
|
return {"status": "max_level_reached", "max_level": max_level, "current_level": current_level}
|
|
matching_rule = rule
|
|
break
|
|
|
|
if not matching_rule:
|
|
logger.info(
|
|
f"No matching escalation rule found for complaint {complaint_id} "
|
|
f"(current level: {current_level}, hours overdue: {hours_overdue:.1f})"
|
|
)
|
|
return {"status": "no_matching_rule", "current_level": current_level}
|
|
|
|
# Determine escalation target
|
|
escalation_target = None
|
|
|
|
if matching_rule.escalate_to_role == "department_manager":
|
|
if complaint.department and complaint.department.manager:
|
|
escalation_target = complaint.department.manager
|
|
|
|
elif matching_rule.escalate_to_role == "hospital_admin":
|
|
# Find hospital admin for this hospital
|
|
escalation_target = User.objects.filter(
|
|
hospital=complaint.hospital, groups__name="Hospital Admin", is_active=True
|
|
).first()
|
|
|
|
elif matching_rule.escalate_to_role == "px_admin":
|
|
# Find PX admin
|
|
escalation_target = User.objects.filter(groups__name="PX Admin", is_active=True).first()
|
|
|
|
elif matching_rule.escalate_to_role == "ceo":
|
|
# Find CEO for this hospital
|
|
escalation_target = User.objects.filter(
|
|
hospital=complaint.hospital, groups__name="CEO", is_active=True
|
|
).first()
|
|
|
|
elif matching_rule.escalate_to_role == "specific_user":
|
|
escalation_target = matching_rule.escalate_to_user
|
|
|
|
if not escalation_target:
|
|
logger.warning(
|
|
f"Could not find escalation target for rule {matching_rule.name} "
|
|
f"({matching_rule.escalate_to_role}) on complaint {complaint_id}"
|
|
)
|
|
return {"status": "no_target_found", "rule": matching_rule.name, "role": matching_rule.escalate_to_role}
|
|
|
|
# Check if already assigned to this person to avoid redundant escalation
|
|
if complaint.assigned_to and complaint.assigned_to.id == escalation_target.id:
|
|
logger.info(
|
|
f"Complaint {complaint_id} already assigned to {escalation_target.get_full_name()}, "
|
|
f"skipping escalation to same person"
|
|
)
|
|
return {"status": "already_assigned", "escalated_to": escalation_target.get_full_name()}
|
|
|
|
# Perform escalation
|
|
old_assignee = complaint.assigned_to
|
|
complaint.assigned_to = escalation_target
|
|
complaint.escalated_at = timezone.now()
|
|
|
|
# Update metadata with escalation level
|
|
complaint.metadata["escalation_level"] = matching_rule.escalation_level
|
|
complaint.metadata["last_escalation_rule"] = {
|
|
"id": str(matching_rule.id),
|
|
"name": matching_rule.name,
|
|
"level": matching_rule.escalation_level,
|
|
"timestamp": timezone.now().isoformat(),
|
|
}
|
|
complaint.save(update_fields=["assigned_to", "escalated_at", "metadata"])
|
|
|
|
# Create update
|
|
ComplaintUpdate.objects.create(
|
|
complaint=complaint,
|
|
update_type="escalation",
|
|
message=(
|
|
f"Automatically escalated to {escalation_target.get_full_name()} "
|
|
f"(Level {matching_rule.escalation_level}, Rule: {matching_rule.name}). "
|
|
f"Complaint is {hours_overdue:.1f} hours overdue."
|
|
),
|
|
created_by=None, # System action
|
|
metadata={
|
|
"rule_id": str(matching_rule.id),
|
|
"rule_name": matching_rule.name,
|
|
"escalation_level": matching_rule.escalation_level,
|
|
"hours_overdue": hours_overdue,
|
|
"old_assignee_id": str(old_assignee.id) if old_assignee else None,
|
|
"new_assignee_id": str(escalation_target.id),
|
|
},
|
|
)
|
|
|
|
# Send notifications
|
|
send_complaint_notification.delay(complaint_id=str(complaint.id), event_type="escalated")
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="complaint_escalated",
|
|
description=f"Complaint automatically escalated to {escalation_target.get_full_name()} (Level {matching_rule.escalation_level})",
|
|
content_object=complaint,
|
|
metadata={
|
|
"rule": matching_rule.name,
|
|
"level": matching_rule.escalation_level,
|
|
"hours_overdue": hours_overdue,
|
|
"escalated_to": escalation_target.get_full_name(),
|
|
},
|
|
)
|
|
|
|
logger.info(
|
|
f"Escalated complaint {complaint_id} to {escalation_target.get_full_name()} "
|
|
f"(Level {matching_rule.escalation_level}) using rule '{matching_rule.name}'"
|
|
)
|
|
|
|
return {
|
|
"status": "escalated",
|
|
"rule": matching_rule.name,
|
|
"level": matching_rule.escalation_level,
|
|
"escalated_to": escalation_target.get_full_name(),
|
|
"hours_overdue": round(hours_overdue, 2),
|
|
}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error escalating complaint: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def escalate_after_reminder(complaint_id):
|
|
"""
|
|
Escalate complaint after reminder if no action taken.
|
|
|
|
This task is triggered by the SLA reminder task for rules with
|
|
reminder_escalation_enabled. It checks if the complaint has had any
|
|
activity since the reminder was sent, and escalates if not.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with escalation status
|
|
"""
|
|
from apps.complaints.models import Complaint, ComplaintUpdate, EscalationRule
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("hospital", "department", "assigned_to", "source").get(
|
|
id=complaint_id
|
|
)
|
|
|
|
# Check if reminder was sent
|
|
if not complaint.reminder_sent_at:
|
|
logger.info(f"No reminder sent for complaint {complaint_id}, skipping escalation")
|
|
return {"status": "no_reminder_sent"}
|
|
|
|
# Get SLA config to check reminder-based escalation
|
|
sla_config = complaint.get_sla_config()
|
|
|
|
if not sla_config:
|
|
logger.info(f"No SLA config for complaint {complaint_id}, skipping reminder escalation")
|
|
return {"status": "no_sla_config"}
|
|
|
|
# Check if reminder escalation is enabled for this hospital
|
|
rules = EscalationRule.objects.filter(
|
|
hospital=complaint.hospital, is_active=True, reminder_escalation_enabled=True
|
|
).order_by("escalation_level")
|
|
|
|
# Filter by severity/priority
|
|
if complaint.severity:
|
|
rules = rules.filter(Q(severity_filter="") | Q(severity_filter=complaint.severity))
|
|
if complaint.priority:
|
|
rules = rules.filter(Q(priority_filter="") | Q(priority_filter=complaint.priority))
|
|
|
|
if not rules.exists():
|
|
logger.info(f"No reminder escalation rules for complaint {complaint_id}")
|
|
return {"status": "no_rules"}
|
|
|
|
# Get current escalation level
|
|
current_level = complaint.metadata.get("escalation_level", 0)
|
|
|
|
# Find matching rule for next level
|
|
matching_rule = None
|
|
for rule in rules:
|
|
if rule.escalation_level == current_level + 1:
|
|
# Calculate time since reminder
|
|
hours_since_reminder = (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600
|
|
|
|
# Check if enough time has passed since reminder
|
|
if hours_since_reminder >= rule.reminder_escalation_hours:
|
|
matching_rule = rule
|
|
break
|
|
|
|
if not matching_rule:
|
|
logger.info(
|
|
f"Reminder escalation not yet triggered for complaint {complaint_id} "
|
|
f"(hours since reminder: {(timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600:.1f})"
|
|
)
|
|
return {
|
|
"status": "not_yet_triggered",
|
|
"hours_since_reminder": (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600,
|
|
}
|
|
|
|
# Trigger the regular escalation task
|
|
result = escalate_complaint_auto.delay(complaint_id)
|
|
|
|
# Add metadata about this being a reminder-based escalation
|
|
if complaint.metadata:
|
|
complaint.metadata["reminder_escalation"] = {
|
|
"rule_id": str(matching_rule.id),
|
|
"rule_name": matching_rule.name,
|
|
"hours_since_reminder": (timezone.now() - complaint.reminder_sent_at).total_seconds() / 3600,
|
|
"timestamp": timezone.now().isoformat(),
|
|
}
|
|
complaint.save(update_fields=["metadata"])
|
|
|
|
logger.info(
|
|
f"Reminder-based escalation triggered for complaint {complaint_id} using rule '{matching_rule.name}'"
|
|
)
|
|
|
|
return {"status": "reminder_escalation_triggered", "rule": matching_rule.name, "escalation_result": result}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error in reminder escalation: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def analyze_complaint_with_ai(complaint_id):
|
|
"""
|
|
Analyze a complaint using AI to determine severity and priority and category.
|
|
|
|
This task is triggered when a complaint is created.
|
|
It uses the AI service to analyze the complaint content and classify it.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with severity, priority, category, and reasoning
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
from apps.core.ai_service import AIService, AIServiceError
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("hospital").get(id=complaint_id)
|
|
|
|
logger.info(f"Starting AI analysis for complaint {complaint_id}")
|
|
|
|
# Get category name if category exists
|
|
category_name = None
|
|
if complaint.category:
|
|
category_name = complaint.category.name_en
|
|
|
|
# Analyze complaint using AI service
|
|
try:
|
|
analysis = AIService.analyze_complaint(
|
|
title=complaint.title,
|
|
description=complaint.description,
|
|
category=category_name,
|
|
hospital_id=complaint.hospital.id,
|
|
)
|
|
|
|
# Get complaint type from analysis
|
|
complaint_type = analysis.get("complaint_type", "complaint")
|
|
|
|
# Analyze emotion using AI service
|
|
emotion_analysis = AIService.analyze_emotion(text=complaint.description)
|
|
|
|
# Update complaint with AI-determined values
|
|
old_severity = complaint.severity
|
|
old_priority = complaint.priority
|
|
old_category = complaint.category
|
|
old_department = complaint.department
|
|
|
|
complaint.severity = analysis["severity"]
|
|
complaint.priority = analysis["priority"]
|
|
|
|
# Update 4-level SHCT taxonomy from AI taxonomy mapping
|
|
from apps.complaints.models import ComplaintCategory
|
|
|
|
taxonomy_mapping = analysis.get("taxonomy_mapping") or {}
|
|
|
|
# Level 1: Domain
|
|
if taxonomy_mapping and taxonomy_mapping.get("domain"):
|
|
domain_id = taxonomy_mapping["domain"].get("id")
|
|
if domain_id:
|
|
try:
|
|
complaint.domain = ComplaintCategory.objects.get(id=domain_id)
|
|
logger.info(f"AI set domain: {complaint.domain.name_en}")
|
|
except ComplaintCategory.DoesNotExist:
|
|
logger.warning(f"Domain ID {domain_id} not found")
|
|
|
|
# Level 2: Category
|
|
if taxonomy_mapping and taxonomy_mapping.get("category"):
|
|
category_id = taxonomy_mapping["category"].get("id")
|
|
if category_id:
|
|
try:
|
|
complaint.category = ComplaintCategory.objects.get(id=category_id)
|
|
logger.info(f"AI set category: {complaint.category.name_en}")
|
|
except ComplaintCategory.DoesNotExist:
|
|
logger.warning(f"Category ID {category_id} not found")
|
|
# Fallback to legacy category matching
|
|
elif analysis.get("category"):
|
|
if category := ComplaintCategory.objects.filter(name_en=analysis["category"]).first():
|
|
complaint.category = category
|
|
|
|
# Level 3: Subcategory
|
|
if taxonomy_mapping and taxonomy_mapping.get("subcategory"):
|
|
subcategory_id = taxonomy_mapping["subcategory"].get("id")
|
|
if subcategory_id:
|
|
try:
|
|
complaint.subcategory_obj = ComplaintCategory.objects.get(id=subcategory_id)
|
|
complaint.subcategory = complaint.subcategory_obj.code or complaint.subcategory_obj.name_en
|
|
logger.info(f"AI set subcategory: {complaint.subcategory_obj.name_en}")
|
|
except ComplaintCategory.DoesNotExist:
|
|
logger.warning(f"Subcategory ID {subcategory_id} not found")
|
|
|
|
# Level 4: Classification
|
|
if taxonomy_mapping and taxonomy_mapping.get("classification"):
|
|
classification_id = taxonomy_mapping["classification"].get("id")
|
|
if classification_id:
|
|
try:
|
|
complaint.classification_obj = ComplaintCategory.objects.get(id=classification_id)
|
|
complaint.classification = (
|
|
complaint.classification_obj.code or complaint.classification_obj.name_en
|
|
)
|
|
logger.info(f"AI set classification: {complaint.classification_obj.name_en}")
|
|
except ComplaintCategory.DoesNotExist:
|
|
logger.warning(f"Classification ID {classification_id} not found")
|
|
|
|
# Update department from AI analysis
|
|
department_name = analysis.get("department", "")
|
|
if department_name:
|
|
from apps.organizations.models import Department
|
|
|
|
# Try exact match first (case-insensitive)
|
|
if department := Department.objects.filter(
|
|
hospital_id=complaint.hospital.id, name__iexact=department_name, status="active"
|
|
).first():
|
|
complaint.department = department
|
|
logger.info(f"Matched department exactly: {department.name}")
|
|
# If no exact match, try partial match
|
|
elif department := Department.objects.filter(
|
|
hospital_id=complaint.hospital.id, name__icontains=department_name, status="active"
|
|
).first():
|
|
complaint.department = department
|
|
logger.info(f"Matched department partially: {department.name} from '{department_name}'")
|
|
else:
|
|
logger.warning(
|
|
f"AI suggested department '{department_name}' but no match found in hospital '{complaint.hospital.name}'"
|
|
)
|
|
|
|
# Update title from AI analysis (use English version)
|
|
if analysis.get("title_en"):
|
|
complaint.title = analysis["title_en"]
|
|
elif analysis.get("title"):
|
|
complaint.title = analysis["title"]
|
|
|
|
# Get ALL staff names from analyze_complaint result (extracted by AI)
|
|
staff_names = analysis.get("staff_names", [])
|
|
primary_staff_name = analysis.get("primary_staff_name", "").strip()
|
|
|
|
# Always get ALL matching staff for PX Admin review
|
|
all_staff_matches = []
|
|
staff_confidence = 0.0
|
|
staff_matching_method = None
|
|
matched_staff_id = None
|
|
|
|
# Capture old staff before matching
|
|
old_staff = complaint.staff
|
|
|
|
# =====================================================
|
|
# STAFF MATCHING: Form-submitted name + AI-extracted names
|
|
# =====================================================
|
|
|
|
# 1. Get staff_name from form (stored in metadata by public_complaint_submit)
|
|
form_staff_name = ""
|
|
if complaint.metadata:
|
|
form_staff_name = complaint.metadata.get("staff_name", "")
|
|
form_staff_name = form_staff_name.strip() if form_staff_name else ""
|
|
|
|
# 2. Build combined list of names to match, with form-submitted name FIRST (highest priority)
|
|
all_staff_names_to_match = []
|
|
|
|
if form_staff_name:
|
|
all_staff_names_to_match.append(form_staff_name)
|
|
logger.info(f"Found staff_name from form submission: '{form_staff_name}'")
|
|
|
|
# 3. Add AI-extracted names (avoid duplicates with form-submitted name)
|
|
for name in staff_names:
|
|
name = name.strip()
|
|
if name and name.lower() != form_staff_name.lower():
|
|
all_staff_names_to_match.append(name)
|
|
|
|
if all_staff_names_to_match:
|
|
logger.info(f"Total staff names to match: {len(all_staff_names_to_match)} - {all_staff_names_to_match}")
|
|
|
|
# Process ALL staff names (form-submitted + AI-extracted)
|
|
if all_staff_names_to_match:
|
|
logger.info(f"AI extracted {len(staff_names)} staff name(s): {staff_names}")
|
|
|
|
# Loop through each name and match to database
|
|
for idx, staff_name in enumerate(all_staff_names_to_match):
|
|
staff_name = staff_name.strip()
|
|
if not staff_name:
|
|
continue
|
|
|
|
logger.info(f"Matching staff name {idx + 1}/{len(all_staff_names_to_match)}: {staff_name}")
|
|
|
|
# Try matching WITH department filter first (higher confidence if match found)
|
|
matches_for_name, confidence_for_name, method_for_name = match_staff_from_name(
|
|
staff_name=staff_name,
|
|
hospital_id=str(complaint.hospital.id),
|
|
department_name=department_name,
|
|
return_all=True, # Return ALL matches
|
|
)
|
|
|
|
# If no match found with department, try WITHOUT department filter
|
|
if not matches_for_name:
|
|
logger.info(
|
|
f"No match found with department filter '{department_name}' for '{staff_name}', trying without department filter..."
|
|
)
|
|
matches_for_name, confidence_for_name, method_for_name = match_staff_from_name(
|
|
staff_name=staff_name,
|
|
hospital_id=str(complaint.hospital.id),
|
|
department_name=None, # Search all departments
|
|
return_all=True,
|
|
)
|
|
|
|
# Add source_name to each match so we know which extracted name it came from
|
|
for match in matches_for_name:
|
|
match["source_name"] = staff_name
|
|
|
|
all_staff_matches.extend(matches_for_name)
|
|
|
|
# Deduplicate matches (same staff can match multiple names)
|
|
seen_ids = set()
|
|
deduped_matches = []
|
|
for match in all_staff_matches:
|
|
if match["id"] not in seen_ids:
|
|
seen_ids.add(match["id"])
|
|
deduped_matches.append(match)
|
|
all_staff_matches = deduped_matches
|
|
|
|
logger.info(f"Total unique staff matches found: {len(all_staff_matches)}")
|
|
|
|
# Logic for staff assignment - CHANGED: NO AUTO-ASSIGNMENT
|
|
needs_staff_review = False
|
|
|
|
if all_staff_matches:
|
|
# Sort by confidence (descending)
|
|
all_staff_matches.sort(key=lambda x: x["confidence"], reverse=True)
|
|
|
|
# Get best match (highest confidence) - BUT DON'T AUTO-ASSIGN
|
|
best_match = all_staff_matches[0]
|
|
matched_staff_id = best_match["id"]
|
|
staff_confidence = best_match["confidence"]
|
|
staff_matching_method = best_match["matching_method"]
|
|
|
|
# DO NOT AUTO-ASSIGN STAFF - Only store suggestions in metadata
|
|
# PX Admins will manually select from suggestions
|
|
logger.info(
|
|
f"Found staff suggestion: {best_match['name_en']} "
|
|
f"for complaint {complaint_id} "
|
|
f"(confidence: {staff_confidence:.2f}, method: {staff_matching_method}) - "
|
|
f"NOT auto-assigned, pending manual review"
|
|
)
|
|
|
|
# Mark for review if:
|
|
# - Low confidence on best match
|
|
# - Multiple names extracted (multiple people mentioned)
|
|
# - Multiple database matches found
|
|
# - ALWAYS mark for review since we're not auto-assigning
|
|
needs_staff_review = True
|
|
|
|
# Assign to department if confidence is high enough (>= 0.7)
|
|
if staff_confidence >= 0.7 and best_match.get("department_id"):
|
|
from apps.organizations.models import Department
|
|
|
|
try:
|
|
dept = Department.objects.get(id=best_match["department_id"])
|
|
complaint.department = dept
|
|
logger.info(f"Assigned to department based on staff match: {dept.name}")
|
|
except Department.DoesNotExist:
|
|
pass
|
|
else:
|
|
# No matches found
|
|
logger.warning(f"No staff matches found for extracted names")
|
|
needs_staff_review = False # No review needed if no names found
|
|
else:
|
|
# No staff names extracted
|
|
logger.info("No staff names extracted from complaint")
|
|
needs_staff_review = False
|
|
|
|
# Update complaint type from AI analysis
|
|
complaint.complaint_type = complaint_type
|
|
|
|
# Skip SLA and PX Actions for appreciations
|
|
is_appreciation = complaint_type == "appreciation"
|
|
|
|
# Save reasoning in metadata
|
|
# Use JSON-serializable values instead of model objects
|
|
old_category_name = old_category.name_en if old_category else None
|
|
old_category_id = str(old_category.id) if old_category else None
|
|
old_department_name = old_department.name if old_department else None
|
|
old_department_id = str(old_department.id) if old_department else None
|
|
old_staff_name = f"{old_staff.first_name} {old_staff.last_name}" if old_staff else None
|
|
old_staff_id = str(old_staff.id) if old_staff else None
|
|
|
|
# Initialize metadata if needed
|
|
if not complaint.metadata:
|
|
complaint.metadata = {}
|
|
|
|
# Update or create ai_analysis in metadata with bilingual support and emotion
|
|
complaint.metadata["ai_analysis"] = {
|
|
"complaint_type": complaint_type,
|
|
"title_en": analysis.get("title_en", ""),
|
|
"title_ar": analysis.get("title_ar", ""),
|
|
"short_description_en": analysis.get("short_description_en", ""),
|
|
"short_description_ar": analysis.get("short_description_ar", ""),
|
|
# Store suggested actions as list (new format)
|
|
"suggested_actions": analysis.get("suggested_actions", []),
|
|
# Keep single action fields for backward compatibility
|
|
"suggested_action_en": analysis.get("suggested_action_en", ""),
|
|
"suggested_action_ar": analysis.get("suggested_action_ar", ""),
|
|
"reasoning_en": analysis.get("reasoning_en", ""),
|
|
"reasoning_ar": analysis.get("reasoning_ar", ""),
|
|
"emotion": emotion_analysis.get("emotion", "neutral"),
|
|
"emotion_intensity": emotion_analysis.get("intensity", 0.0),
|
|
"emotion_confidence": emotion_analysis.get("confidence", 0.0),
|
|
"analyzed_at": timezone.now().isoformat(),
|
|
"old_severity": old_severity,
|
|
"old_priority": old_priority,
|
|
"old_category": old_category_name,
|
|
"old_category_id": old_category_id,
|
|
"old_department": old_department_name,
|
|
"old_department_id": old_department_id,
|
|
"old_staff": old_staff_name,
|
|
"old_staff_id": old_staff_id,
|
|
"extracted_staff_names": staff_names,
|
|
"primary_staff_name": primary_staff_name,
|
|
"staff_matches": all_staff_matches,
|
|
"matched_staff_id": matched_staff_id,
|
|
"staff_confidence": staff_confidence,
|
|
"staff_matching_method": staff_matching_method,
|
|
"needs_staff_review": needs_staff_review,
|
|
"staff_match_count": len(all_staff_matches),
|
|
# Full 4-level taxonomy from AI
|
|
"taxonomy": analysis.get("taxonomy", {}),
|
|
"taxonomy_mapping": taxonomy_mapping,
|
|
}
|
|
|
|
complaint.save(
|
|
update_fields=[
|
|
"complaint_type",
|
|
"severity",
|
|
"priority",
|
|
"domain",
|
|
"category",
|
|
"subcategory",
|
|
"subcategory_obj",
|
|
"classification",
|
|
"classification_obj",
|
|
"department",
|
|
"staff",
|
|
"title",
|
|
"metadata",
|
|
]
|
|
)
|
|
|
|
# Re-calculate SLA due date based on new severity (skip for appreciations)
|
|
if not is_appreciation:
|
|
complaint.due_at = complaint.calculate_sla_due_date()
|
|
complaint.save(update_fields=["due_at"])
|
|
|
|
# Create timeline update for AI completion
|
|
from apps.complaints.models import ComplaintUpdate
|
|
|
|
# Build bilingual message
|
|
emotion_display = emotion_analysis.get("emotion", "neutral")
|
|
emotion_intensity = emotion_analysis.get("intensity", 0.0)
|
|
|
|
# Build English message
|
|
message_en = f"AI analysis complete: Severity={analysis['severity']}, Priority={analysis['priority']}, Category={analysis.get('category', 'N/A')}, Department={department_name or 'N/A'}"
|
|
if matched_staff_id:
|
|
message_en += f", Staff={f'{complaint.staff.first_name} {complaint.staff.last_name}' if complaint.staff else 'N/A'} (confidence: {staff_confidence:.2f})"
|
|
message_en += f", Emotion={emotion_display} (Intensity: {emotion_intensity:.2f})"
|
|
|
|
# Build Arabic message
|
|
message_ar = f"اكتمل تحليل الذكاء الاصطناعي: الشدة={analysis['severity']}, الأولوية={analysis['priority']}, الفئة={analysis.get('category', 'N/A')}, القسم={department_name or 'N/A'}"
|
|
if matched_staff_id and complaint.staff:
|
|
staff_name_ar = (
|
|
complaint.staff.first_name_ar if complaint.staff.first_name_ar else complaint.staff.first_name
|
|
)
|
|
message_ar += f", الموظف={staff_name_ar} {complaint.staff.last_name_ar if complaint.staff.last_name_ar else complaint.staff.last_name} (الثقة: {staff_confidence:.2f})"
|
|
message_ar += f", العاطفة={emotion_display} (الشدة: {emotion_intensity:.2f})"
|
|
|
|
message = f"{message_en}\n\n{message_ar}"
|
|
|
|
ComplaintUpdate.objects.create(complaint=complaint, update_type="note", message=message)
|
|
|
|
# Initialize action_id
|
|
action_id = None
|
|
|
|
# Skip PX Action creation - now manual only via "Create PX Action" button
|
|
if is_appreciation:
|
|
logger.info(f"Skipping PX Action creation for appreciation {complaint_id}")
|
|
# Create timeline entry for appreciation
|
|
ComplaintUpdate.objects.create(
|
|
complaint=complaint,
|
|
update_type="note",
|
|
message=f"Appreciation detected - No PX Action or SLA tracking required for positive feedback.",
|
|
)
|
|
else:
|
|
logger.info(
|
|
f"Skipping automatic PX Action creation for complaint {complaint_id} - manual creation only"
|
|
)
|
|
# PX Action creation is now MANUAL only via the "Create PX Action" button in AI Analysis tab
|
|
# action_id remains None from initialization above
|
|
|
|
logger.info(
|
|
f"AI analysis complete for complaint {complaint_id}: "
|
|
f"severity={old_severity}->{analysis['severity']}, "
|
|
f"priority={old_priority}->{analysis['priority']}, "
|
|
f"category={old_category_name}->{analysis['category']}, "
|
|
f"department={old_department_name}->{department_name}, "
|
|
f"title_en={analysis.get('title_en', '')}"
|
|
)
|
|
|
|
return {
|
|
"status": "success",
|
|
"complaint_id": str(complaint_id),
|
|
"severity": analysis["severity"],
|
|
"priority": analysis["priority"],
|
|
"category": analysis["category"],
|
|
"department": department_name,
|
|
"title_en": analysis.get("title_en", ""),
|
|
"title_ar": analysis.get("title_ar", ""),
|
|
"short_description_en": analysis.get("short_description_en", ""),
|
|
"short_description_ar": analysis.get("short_description_ar", ""),
|
|
"suggested_action_en": analysis.get("suggested_action_en", ""),
|
|
"suggested_action_ar": analysis.get("suggested_action_ar", ""),
|
|
"reasoning_en": analysis.get("reasoning_en", ""),
|
|
"reasoning_ar": analysis.get("reasoning_ar", ""),
|
|
"emotion": emotion_analysis.get("emotion", "neutral"),
|
|
"emotion_intensity": emotion_analysis.get("intensity", 0.0),
|
|
"emotion_confidence": emotion_analysis.get("confidence", 0.0),
|
|
"old_severity": old_severity,
|
|
"old_priority": old_priority,
|
|
"px_action_id": action_id,
|
|
"px_action_auto_created": action_id is not None,
|
|
}
|
|
|
|
except AIServiceError as e:
|
|
logger.error(f"AI service error for complaint {complaint_id}: {str(e)}")
|
|
# Keep default values (medium/medium) and log the error
|
|
return {"status": "ai_error", "complaint_id": str(complaint_id), "reason": str(e)}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
except Exception as e:
|
|
error_msg = f"Error analyzing complaint {complaint_id} with AI: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def send_complaint_notification(complaint_id, event_type):
|
|
"""
|
|
Send notification for complaint events.
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
event_type: Type of event (created, assigned, overdue, escalated, resolved, closed)
|
|
|
|
Returns:
|
|
dict: Result with notification status
|
|
"""
|
|
from apps.complaints.models import Complaint
|
|
from apps.notifications.services import NotificationService
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related("hospital", "patient", "assigned_to", "department").get(
|
|
id=complaint_id
|
|
)
|
|
|
|
# Determine recipients based on event type
|
|
recipients = []
|
|
|
|
if event_type == "created":
|
|
# Notify assigned user or department manager
|
|
if complaint.assigned_to:
|
|
recipients.append(complaint.assigned_to)
|
|
elif complaint.department and complaint.department.manager:
|
|
recipients.append(complaint.department.manager)
|
|
|
|
elif event_type == "assigned":
|
|
# Notify assignee
|
|
if complaint.assigned_to:
|
|
recipients.append(complaint.assigned_to)
|
|
|
|
elif event_type in ["overdue", "escalated"]:
|
|
# Notify assignee and their manager
|
|
if complaint.assigned_to:
|
|
recipients.append(complaint.assigned_to)
|
|
if complaint.department and complaint.department.manager:
|
|
recipients.append(complaint.department.manager)
|
|
|
|
elif event_type == "resolved":
|
|
# Notify patient
|
|
recipients.append(complaint.patient)
|
|
|
|
elif event_type == "closed":
|
|
# Notify patient
|
|
recipients.append(complaint.patient)
|
|
|
|
# Send notifications
|
|
notification_count = 0
|
|
for recipient in recipients:
|
|
try:
|
|
# Check if NotificationService has send_notification method
|
|
if hasattr(NotificationService, "send_notification"):
|
|
NotificationService.send_notification(
|
|
recipient=recipient,
|
|
title=f"Complaint {event_type.title()}: {complaint.title[:50]}",
|
|
message=f"Complaint #{str(complaint.id)[:8]} has been {event_type}.",
|
|
notification_type="complaint",
|
|
related_object=complaint,
|
|
)
|
|
notification_count += 1
|
|
else:
|
|
logger.warning(f"NotificationService.send_notification method not available")
|
|
except Exception as e:
|
|
logger.error(f"Failed to send notification to {recipient}: {str(e)}")
|
|
|
|
logger.info(f"Sent {notification_count} notifications for complaint {complaint_id} event: {event_type}")
|
|
|
|
return {"status": "sent", "notification_count": notification_count, "event_type": event_type}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error sending complaint notification: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
def get_explanation_sla_config(hospital):
|
|
"""
|
|
Get explanation SLA configuration for a hospital.
|
|
|
|
Returns the first active ExplanationSLAConfig for the hospital.
|
|
Returns None if no config exists (will use defaults).
|
|
"""
|
|
from apps.complaints.models import ExplanationSLAConfig
|
|
|
|
try:
|
|
return ExplanationSLAConfig.objects.get(hospital=hospital, is_active=True)
|
|
except ExplanationSLAConfig.DoesNotExist:
|
|
return None
|
|
|
|
|
|
@shared_task
|
|
def send_explanation_request_email(explanation_id):
|
|
"""
|
|
Send email to staff requesting explanation.
|
|
|
|
Includes link with unique token for staff to submit explanation.
|
|
Sets SLA deadline based on hospital configuration.
|
|
"""
|
|
from apps.complaints.models import ComplaintExplanation
|
|
from django.core.mail import send_mail
|
|
from django.conf import settings
|
|
from django.template.loader import render_to_string
|
|
|
|
explanation = ComplaintExplanation.objects.select_related("complaint", "staff", "requested_by").get(
|
|
id=explanation_id
|
|
)
|
|
|
|
# Calculate SLA deadline
|
|
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
|
|
sla_hours = sla_config.response_hours if sla_config else 48
|
|
|
|
explanation.sla_due_at = timezone.now() + timezone.timedelta(hours=sla_hours)
|
|
explanation.email_sent_at = timezone.now()
|
|
explanation.save(update_fields=["sla_due_at", "email_sent_at"])
|
|
|
|
# Prepare email
|
|
context = {
|
|
"explanation": explanation,
|
|
"complaint": explanation.complaint,
|
|
"staff": explanation.staff,
|
|
"requested_by": explanation.requested_by,
|
|
"sla_hours": sla_hours,
|
|
"due_date": explanation.sla_due_at,
|
|
"site_url": settings.SITE_URL if hasattr(settings, "SITE_URL") else "http://localhost:8000",
|
|
}
|
|
|
|
subject = f"Explanation Request: Complaint #{str(explanation.complaint.id)[:8]}"
|
|
|
|
# Render email template with new branded template
|
|
html_message = render_to_string("emails/explanation_request.html", context)
|
|
|
|
# Plain text fallback
|
|
message_text = (
|
|
render_to_string("complaints/emails/explanation_request_en.txt", context)
|
|
if context.get("complaint", {}).get("description")
|
|
else f"""
|
|
Explanation Request - Complaint #{str(explanation.complaint.id)[:8]}
|
|
|
|
Dear {explanation.staff.get_full_name()},
|
|
|
|
You have been assigned to provide an explanation for a patient complaint.
|
|
|
|
Complaint Reference: #{str(explanation.complaint.id)[:8]}
|
|
Patient: {explanation.complaint.patient_name if hasattr(explanation.complaint, "patient_name") else "N/A"}
|
|
Hospital: {explanation.complaint.hospital.name}
|
|
Department: {explanation.complaint.department.name if explanation.complaint.department else "N/A"}
|
|
|
|
Please submit your explanation using the link provided in the HTML email.
|
|
|
|
Thank you,
|
|
PX360 Complaint Management System
|
|
Al Hammadi Hospital
|
|
"""
|
|
)
|
|
|
|
# Send email
|
|
send_mail(
|
|
subject=subject,
|
|
message=message_text,
|
|
from_email=settings.DEFAULT_FROM_EMAIL,
|
|
recipient_list=[explanation.staff.email],
|
|
html_message=html_message,
|
|
fail_silently=False,
|
|
)
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="explanation_request_sent",
|
|
description=f"Explanation request email sent to {explanation.staff.get_full_name()}",
|
|
content_object=explanation,
|
|
metadata={
|
|
"complaint_id": str(explanation.complaint.id),
|
|
"staff_name": explanation.staff.get_full_name(),
|
|
"sla_hours": sla_hours,
|
|
"due_date": explanation.sla_due_at.isoformat(),
|
|
},
|
|
)
|
|
|
|
logger.info(
|
|
f"Explanation request email sent to {explanation.staff.get_full_name()} "
|
|
f"for complaint {explanation.complaint_id}"
|
|
)
|
|
|
|
return {"status": "sent", "explanation_id": str(explanation.id)}
|
|
|
|
|
|
@shared_task
|
|
def check_overdue_explanation_requests():
|
|
"""
|
|
Periodic task to check for overdue explanation requests.
|
|
|
|
Runs every 15 minutes (configured in config/celery.py).
|
|
When staff doesn't respond within SLA, creates an explanation request with link for manager.
|
|
Follows staff hierarchy via report_to field.
|
|
"""
|
|
from apps.complaints.models import ComplaintExplanation
|
|
from apps.organizations.models import Staff
|
|
|
|
now = timezone.now()
|
|
|
|
# Get explanation requests that are:
|
|
# - Not submitted (is_used=False)
|
|
# - Email sent (email_sent_at is not null)
|
|
# - Past SLA deadline
|
|
# - Not yet escalated (escalated_to_manager is null)
|
|
overdue_explanations = ComplaintExplanation.objects.filter(
|
|
is_used=False, email_sent_at__isnull=False, sla_due_at__lt=now, escalated_to_manager__isnull=True
|
|
).select_related("complaint", "staff", "staff__department")
|
|
|
|
escalated_count = 0
|
|
|
|
for explanation in overdue_explanations:
|
|
# Mark as overdue
|
|
if not explanation.is_overdue:
|
|
explanation.is_overdue = True
|
|
explanation.save(update_fields=["is_overdue"])
|
|
|
|
# Get SLA config
|
|
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
|
|
|
|
# Check if auto-escalation is enabled
|
|
if not sla_config or not sla_config.auto_escalate_enabled:
|
|
logger.info(
|
|
f"Auto-escalation disabled for explanation {explanation.id}, "
|
|
f"hospital {explanation.complaint.hospital.name}"
|
|
)
|
|
continue
|
|
|
|
# Get current escalation level
|
|
current_level = explanation.metadata.get("escalation_level", 0)
|
|
|
|
# Check max escalation level
|
|
max_level = sla_config.max_escalation_levels if sla_config else 3
|
|
|
|
if current_level >= max_level:
|
|
logger.info(f"Explanation {explanation.id} reached max escalation level {max_level}")
|
|
continue
|
|
|
|
# Calculate hours overdue
|
|
hours_overdue = (now - explanation.sla_due_at).total_seconds() / 3600
|
|
|
|
# Check if we should escalate now
|
|
escalation_delay = sla_config.escalation_hours_overdue if sla_config else 0
|
|
if hours_overdue < escalation_delay:
|
|
logger.info(
|
|
f"Explanation {explanation.id} overdue by {hours_overdue:.1f}h, "
|
|
f"waiting for escalation delay of {escalation_delay}h"
|
|
)
|
|
continue
|
|
|
|
# Determine escalation target - manager of the staff member
|
|
if explanation.staff and explanation.staff.report_to:
|
|
manager = explanation.staff.report_to
|
|
|
|
# Check if manager already has an active explanation request for this complaint
|
|
existing_manager_explanation = ComplaintExplanation.objects.filter(
|
|
complaint=explanation.complaint, staff=manager
|
|
).first()
|
|
|
|
if existing_manager_explanation and not existing_manager_explanation.is_used:
|
|
logger.info(
|
|
f"Manager {manager.get_full_name()} already has an active explanation "
|
|
f"request for complaint {explanation.complaint.id}, skipping escalation"
|
|
)
|
|
# Mark as escalated anyway to avoid repeated checks
|
|
explanation.escalated_to_manager = existing_manager_explanation
|
|
explanation.escalated_at = now
|
|
explanation.metadata["escalation_level"] = current_level + 1
|
|
explanation.save(update_fields=["escalated_to_manager", "escalated_at", "metadata"])
|
|
escalated_count += 1
|
|
continue
|
|
|
|
if existing_manager_explanation and existing_manager_explanation.is_used:
|
|
logger.info(
|
|
f"Manager {manager.get_full_name()} already submitted an explanation "
|
|
f"for complaint {explanation.complaint.id}, skipping escalation"
|
|
)
|
|
# Mark as escalated
|
|
explanation.escalated_to_manager = existing_manager_explanation
|
|
explanation.escalated_at = now
|
|
explanation.metadata["escalation_level"] = current_level + 1
|
|
explanation.save(update_fields=["escalated_to_manager", "escalated_at", "metadata"])
|
|
escalated_count += 1
|
|
continue
|
|
|
|
# Create new explanation request for manager with token/link
|
|
import secrets
|
|
|
|
manager_token = secrets.token_urlsafe(32)
|
|
|
|
# Calculate new SLA deadline for manager
|
|
sla_hours = sla_config.response_hours if sla_config else 48
|
|
|
|
new_explanation = ComplaintExplanation.objects.create(
|
|
complaint=explanation.complaint,
|
|
staff=manager,
|
|
token=manager_token,
|
|
explanation="", # Will be filled by manager
|
|
requested_by=explanation.requested_by,
|
|
request_message=(
|
|
f"ESCALATED: {explanation.staff.get_full_name()} did not provide an explanation "
|
|
f"within the SLA deadline ({sla_hours} hours). "
|
|
f"As their manager, please provide your explanation about this complaint."
|
|
),
|
|
submitted_via="email_link",
|
|
sla_due_at=now + timezone.timedelta(hours=sla_hours),
|
|
email_sent_at=now,
|
|
metadata={
|
|
"escalated_from_explanation_id": str(explanation.id),
|
|
"escalation_level": current_level + 1,
|
|
"original_staff_id": str(explanation.staff.id),
|
|
"original_staff_name": explanation.staff.get_full_name(),
|
|
"is_escalation": True,
|
|
},
|
|
)
|
|
|
|
# Link old explanation to new one
|
|
explanation.escalated_to_manager = new_explanation
|
|
explanation.escalated_at = now
|
|
explanation.metadata["escalation_level"] = current_level + 1
|
|
explanation.save(update_fields=["escalated_to_manager", "escalated_at", "metadata"])
|
|
|
|
# Send email to manager with link
|
|
send_explanation_request_email.delay(str(new_explanation.id))
|
|
|
|
escalated_count += 1
|
|
|
|
logger.info(
|
|
f"Escalated explanation request {explanation.id} to manager "
|
|
f"{manager.get_full_name()} (Level {current_level + 1})"
|
|
)
|
|
else:
|
|
logger.warning(f"No escalation target for explanation {explanation.id} (staff has no report_to manager)")
|
|
|
|
return {"overdue_count": overdue_explanations.count(), "escalated_count": escalated_count}
|
|
|
|
|
|
@shared_task
|
|
def send_explanation_reminders():
|
|
"""
|
|
Send reminder emails for explanation requests approaching deadline.
|
|
|
|
Runs every hour via Celery Beat.
|
|
Sends reminder to staff if explanation not submitted and deadline approaching.
|
|
"""
|
|
from apps.complaints.models import ComplaintExplanation
|
|
from django.core.mail import send_mail
|
|
from django.conf import settings
|
|
from django.template.loader import render_to_string
|
|
|
|
now = timezone.now()
|
|
|
|
# Get explanation requests that:
|
|
# - Not submitted (is_used=False)
|
|
# - Email sent (email_sent_at is not null)
|
|
# - Haven't been reminded yet
|
|
# - Approaching deadline
|
|
explanations = ComplaintExplanation.objects.filter(
|
|
is_used=False, email_sent_at__isnull=False, reminder_sent_at__isnull=True, escalated_to_manager__isnull=True
|
|
).select_related("complaint", "staff")
|
|
|
|
reminder_count = 0
|
|
|
|
for explanation in explanations:
|
|
# Get SLA config
|
|
sla_config = get_explanation_sla_config(explanation.complaint.hospital)
|
|
reminder_hours_before = sla_config.reminder_hours_before if sla_config else 12
|
|
|
|
# Calculate reminder threshold time
|
|
reminder_time = explanation.sla_due_at - timezone.timedelta(hours=reminder_hours_before)
|
|
|
|
# Check if we should send reminder now
|
|
if now >= reminder_time:
|
|
# Calculate hours remaining
|
|
hours_remaining = (explanation.sla_due_at - now).total_seconds() / 3600
|
|
|
|
if hours_remaining < 0:
|
|
continue # Already overdue, will be handled by check_overdue_explanation_requests
|
|
|
|
# Prepare email context
|
|
context = {
|
|
"explanation": explanation,
|
|
"complaint": explanation.complaint,
|
|
"staff": explanation.staff,
|
|
"hours_remaining": int(hours_remaining),
|
|
"due_date": explanation.sla_due_at,
|
|
"site_url": settings.SITE_URL if hasattr(settings, "SITE_URL") else "http://localhost:8000",
|
|
}
|
|
|
|
subject = f"Reminder: Explanation Request - Complaint #{str(explanation.complaint.id)[:8]}"
|
|
|
|
try:
|
|
# Render email templates
|
|
message_en = render_to_string("complaints/emails/explanation_reminder_en.txt", context)
|
|
message_ar = render_to_string("complaints/emails/explanation_reminder_ar.txt", context)
|
|
|
|
# Send email
|
|
send_mail(
|
|
subject=subject,
|
|
message=f"{message_en}\n\n{message_ar}",
|
|
from_email=settings.DEFAULT_FROM_EMAIL,
|
|
recipient_list=[explanation.staff.email],
|
|
fail_silently=False,
|
|
)
|
|
|
|
# Update explanation
|
|
explanation.reminder_sent_at = now
|
|
explanation.save(update_fields=["reminder_sent_at"])
|
|
|
|
reminder_count += 1
|
|
|
|
logger.info(
|
|
f"Explanation reminder sent to {explanation.staff.get_full_name()} "
|
|
f"for complaint {explanation.complaint_id} "
|
|
f"({int(hours_remaining)} hours remaining)"
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to send explanation reminder for {explanation.id}: {str(e)}")
|
|
|
|
return {"status": "completed", "reminders_sent": reminder_count}
|
|
|
|
|
|
def get_hospital_admins_and_coordinators(hospital):
|
|
"""
|
|
Get Hospital Admins and PX Coordinators for a specific hospital.
|
|
These users receive SLA reminders for unassigned complaints.
|
|
|
|
Args:
|
|
hospital: Hospital instance
|
|
|
|
Returns:
|
|
QuerySet of User objects
|
|
"""
|
|
from apps.accounts.models import User
|
|
from django.db.models import Q
|
|
|
|
return User.objects.filter(
|
|
Q(groups__name="Hospital Admin") | Q(groups__name="PX Coordinator"), hospital=hospital, is_active=True
|
|
).distinct()
|
|
|
|
|
|
@shared_task
|
|
def send_sla_reminders():
|
|
"""
|
|
Send SLA reminder emails for complaints approaching deadline.
|
|
|
|
Runs every hour via Celery Beat.
|
|
Finds complaints where reminder should be sent based on source-based or hospital's SLA configuration.
|
|
Sends reminder email to assigned user or department manager.
|
|
Creates timeline entry for reminder sent.
|
|
|
|
Supports both source-based timing (hours after creation) and legacy timing (hours before deadline).
|
|
|
|
Returns:
|
|
dict: Result with reminder count and details
|
|
"""
|
|
from apps.complaints.models import Complaint, ComplaintUpdate, ComplaintStatus, ComplaintSLAConfig
|
|
from apps.notifications.services import NotificationService
|
|
from django.core.mail import send_mail
|
|
from django.conf import settings
|
|
from django.template.loader import render_to_string
|
|
|
|
try:
|
|
now = timezone.now()
|
|
|
|
# Get active complaints that haven't been reminded yet OR need second reminder
|
|
active_complaints = (
|
|
Complaint.objects.filter(status__in=[ComplaintStatus.OPEN, ComplaintStatus.IN_PROGRESS])
|
|
.filter(
|
|
Q(reminder_sent_at__isnull=True) # First reminder not sent
|
|
| Q(
|
|
reminder_sent_at__isnull=False,
|
|
second_reminder_sent_at__isnull=True,
|
|
reminder_sent_at__lt=now - timezone.timedelta(hours=1), # At least 1 hour after first reminder
|
|
)
|
|
)
|
|
.select_related("hospital", "patient", "assigned_to", "department", "category", "source")
|
|
)
|
|
|
|
reminder_count = 0
|
|
skipped_count = 0
|
|
|
|
for complaint in active_complaints:
|
|
# Get SLA config for this complaint (source-based or severity/priority-based)
|
|
sla_config = complaint.get_sla_config()
|
|
|
|
# Calculate first reminder timing
|
|
if sla_config:
|
|
# Use config's helper method to get hours after creation
|
|
first_reminder_hours_after = sla_config.get_first_reminder_hours_after(complaint.created_at)
|
|
second_reminder_hours_after = sla_config.get_second_reminder_hours_after(complaint.created_at)
|
|
else:
|
|
# Calculate SLA hours from due_at and created_at
|
|
if complaint.due_at and complaint.created_at:
|
|
sla_hours = int((complaint.due_at - complaint.created_at).total_seconds() / 3600)
|
|
else:
|
|
sla_hours = 72 # Default 72 hours
|
|
first_reminder_hours_after = sla_hours - 24 # 24 hours before deadline
|
|
second_reminder_hours_after = sla_hours - 6 # 6 hours before deadline
|
|
|
|
# Check if we should send FIRST reminder now
|
|
if complaint.reminder_sent_at is None:
|
|
# Calculate when reminder should be sent
|
|
if first_reminder_hours_after > 0:
|
|
# Source-based: hours after creation
|
|
reminder_time = complaint.created_at + timezone.timedelta(hours=first_reminder_hours_after)
|
|
else:
|
|
# Legacy: hours before deadline
|
|
reminder_time = complaint.due_at - timezone.timedelta(hours=24)
|
|
|
|
if now >= reminder_time:
|
|
# Determine recipient
|
|
recipient = complaint.assigned_to
|
|
if not recipient and complaint.department and complaint.department.manager:
|
|
recipient = complaint.department.manager
|
|
|
|
# Fallback to Hospital Admins and PX Coordinators for unassigned complaints
|
|
is_unassigned = False
|
|
fallback_recipients = []
|
|
|
|
if not recipient:
|
|
is_unassigned = True
|
|
fallback_recipients = get_hospital_admins_and_coordinators(complaint.hospital)
|
|
|
|
if not fallback_recipients.exists():
|
|
logger.warning(
|
|
f"No Hospital Admins or PX Coordinators found for hospital {complaint.hospital.name} "
|
|
f"to receive SLA reminder for unassigned complaint {complaint.id}"
|
|
)
|
|
skipped_count += 1
|
|
continue
|
|
|
|
# Calculate hours remaining
|
|
hours_remaining = (complaint.due_at - now).total_seconds() / 3600
|
|
|
|
# Prepare email context
|
|
context = {
|
|
"complaint": complaint,
|
|
"recipient": recipient,
|
|
"hours_remaining": int(hours_remaining),
|
|
"due_date": complaint.due_at,
|
|
"site_url": f"{settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000'}",
|
|
"is_unassigned": is_unassigned,
|
|
}
|
|
|
|
# Render email templates
|
|
if is_unassigned:
|
|
subject = (
|
|
f"[UNASSIGNED] SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
|
|
)
|
|
else:
|
|
subject = f"SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
|
|
|
|
try:
|
|
# Determine recipients list
|
|
recipients_to_notify = fallback_recipients if is_unassigned else [recipient]
|
|
recipient_names = []
|
|
|
|
for notif_recipient in recipients_to_notify:
|
|
recipient_names.append(notif_recipient.get_full_name())
|
|
|
|
# Try to send via NotificationService first
|
|
if hasattr(NotificationService, "send_notification"):
|
|
if is_unassigned:
|
|
message = (
|
|
f"This is a reminder about an UNASSIGNED complaint #{str(complaint.id)[:8]} "
|
|
f"that needs attention. "
|
|
f"It is due in {int(hours_remaining)} hours. "
|
|
f"Please assign it to an appropriate team member."
|
|
)
|
|
else:
|
|
message = (
|
|
f"This is a reminder that complaint #{str(complaint.id)[:8]} "
|
|
f"is due in {int(hours_remaining)} hours. "
|
|
f"Please take action to avoid SLA breach."
|
|
)
|
|
NotificationService.send_notification(
|
|
recipient=notif_recipient,
|
|
title=subject,
|
|
message=message,
|
|
notification_type="complaint",
|
|
related_object=complaint,
|
|
metadata={"event_type": "sla_reminder", "is_unassigned": is_unassigned},
|
|
)
|
|
else:
|
|
# Fallback to direct email
|
|
message_en = render_to_string("complaints/emails/sla_reminder_en.txt", context)
|
|
message_ar = render_to_string("complaints/emails/sla_reminder_ar.txt", context)
|
|
|
|
recipient_email = notif_recipient.email if hasattr(notif_recipient, "email") else None
|
|
if recipient_email:
|
|
send_mail(
|
|
subject=subject,
|
|
message=f"{message_en}\n\n{message_ar}",
|
|
from_email=settings.DEFAULT_FROM_EMAIL,
|
|
recipient_list=[recipient_email],
|
|
fail_silently=False,
|
|
)
|
|
else:
|
|
logger.warning(f"No email for recipient {notif_recipient}")
|
|
|
|
# Update complaint
|
|
complaint.reminder_sent_at = now
|
|
complaint.save(update_fields=["reminder_sent_at"])
|
|
|
|
# Create timeline entry
|
|
if is_unassigned:
|
|
timeline_message = (
|
|
f"SLA reminder sent to Hospital Admins/Coordinators for UNASSIGNED complaint. "
|
|
f"Complaint is due in {int(hours_remaining)} hours. "
|
|
f"Recipients: {', '.join(recipient_names)}"
|
|
)
|
|
else:
|
|
timeline_message = (
|
|
f"SLA reminder sent to {recipient.get_full_name()}. "
|
|
f"Complaint is due in {int(hours_remaining)} hours."
|
|
)
|
|
|
|
ComplaintUpdate.objects.create(
|
|
complaint=complaint,
|
|
update_type="note",
|
|
message=timeline_message,
|
|
created_by=None, # System action
|
|
metadata={
|
|
"event_type": "sla_reminder",
|
|
"hours_remaining": int(hours_remaining),
|
|
"is_unassigned": is_unassigned,
|
|
"recipients": recipient_names,
|
|
},
|
|
)
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="sla_reminder_sent",
|
|
description=f"SLA reminder sent for complaint {complaint.id}",
|
|
content_object=complaint,
|
|
metadata={
|
|
"recipients": recipient_names,
|
|
"hours_remaining": int(hours_remaining),
|
|
"is_unassigned": is_unassigned,
|
|
},
|
|
)
|
|
|
|
reminder_count += 1
|
|
if is_unassigned:
|
|
logger.info(
|
|
f"SLA reminder sent for UNASSIGNED complaint {complaint.id} "
|
|
f"to Hospital Admins/Coordinators: {', '.join(recipient_names)} "
|
|
f"({int(hours_remaining)} hours remaining)"
|
|
)
|
|
else:
|
|
logger.info(
|
|
f"SLA reminder sent for complaint {complaint.id} "
|
|
f"to {recipient.get_full_name()} "
|
|
f"({int(hours_remaining)} hours remaining)"
|
|
)
|
|
|
|
# Trigger reminder-based escalation check
|
|
escalate_after_reminder.delay(str(complaint.id))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to send SLA reminder for complaint {complaint.id}: {str(e)}")
|
|
skipped_count += 1
|
|
|
|
# Check if we should send SECOND reminder now
|
|
elif complaint.second_reminder_sent_at is None and second_reminder_hours_after > 0:
|
|
# Calculate when second reminder should be sent
|
|
if second_reminder_hours_after > 0:
|
|
# Source-based: hours after creation
|
|
second_reminder_time = complaint.created_at + timezone.timedelta(hours=second_reminder_hours_after)
|
|
else:
|
|
# Legacy: hours before deadline
|
|
second_reminder_time = complaint.due_at - timezone.timedelta(hours=6)
|
|
|
|
if now >= second_reminder_time:
|
|
# Determine recipient
|
|
recipient = complaint.assigned_to
|
|
if not recipient and complaint.department and complaint.department.manager:
|
|
recipient = complaint.department.manager
|
|
|
|
# Fallback to Hospital Admins and PX Coordinators for unassigned complaints
|
|
is_unassigned = False
|
|
fallback_recipients = []
|
|
|
|
if not recipient:
|
|
is_unassigned = True
|
|
fallback_recipients = get_hospital_admins_and_coordinators(complaint.hospital)
|
|
|
|
if not fallback_recipients.exists():
|
|
logger.warning(
|
|
f"No Hospital Admins or PX Coordinators found for hospital {complaint.hospital.name} "
|
|
f"to receive second SLA reminder for unassigned complaint {complaint.id}"
|
|
)
|
|
skipped_count += 1
|
|
continue
|
|
|
|
# Calculate hours remaining
|
|
hours_remaining = (complaint.due_at - now).total_seconds() / 3600
|
|
|
|
# Prepare email context
|
|
context = {
|
|
"complaint": complaint,
|
|
"recipient": recipient,
|
|
"hours_remaining": int(hours_remaining),
|
|
"due_date": complaint.due_at,
|
|
"site_url": f"{settings.SITE_URL if hasattr(settings, 'SITE_URL') else 'http://localhost:8000'}",
|
|
"is_unassigned": is_unassigned,
|
|
}
|
|
|
|
# Render email templates
|
|
if is_unassigned:
|
|
subject = f"[UNASSIGNED] URGENT - Second SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
|
|
else:
|
|
subject = (
|
|
f"URGENT - Second SLA Reminder: Complaint #{str(complaint.id)[:8]} - {complaint.title[:50]}"
|
|
)
|
|
|
|
try:
|
|
# Determine recipients list
|
|
recipients_to_notify = fallback_recipients if is_unassigned else [recipient]
|
|
recipient_names = []
|
|
|
|
for notif_recipient in recipients_to_notify:
|
|
recipient_names.append(notif_recipient.get_full_name())
|
|
|
|
# Try to send via NotificationService first
|
|
if hasattr(NotificationService, "send_notification"):
|
|
if is_unassigned:
|
|
message = (
|
|
f"URGENT: This is the SECOND and FINAL reminder about an UNASSIGNED complaint "
|
|
f"#{str(complaint.id)[:8]} that needs immediate attention. "
|
|
f"It is due in {int(hours_remaining)} hours. "
|
|
f"IMMEDIATE action required - please assign and address this complaint."
|
|
)
|
|
else:
|
|
message = (
|
|
f"This is the SECOND and FINAL reminder that complaint #{str(complaint.id)[:8]} "
|
|
f"is due in {int(hours_remaining)} hours. "
|
|
f"URGENT action required to avoid SLA breach and escalation."
|
|
)
|
|
NotificationService.send_notification(
|
|
recipient=notif_recipient,
|
|
title=subject,
|
|
message=message,
|
|
notification_type="complaint",
|
|
related_object=complaint,
|
|
metadata={"event_type": "sla_second_reminder", "is_unassigned": is_unassigned},
|
|
)
|
|
else:
|
|
# Fallback to direct email
|
|
message_en = render_to_string("complaints/emails/sla_second_reminder_en.txt", context)
|
|
message_ar = render_to_string("complaints/emails/sla_second_reminder_ar.txt", context)
|
|
|
|
recipient_email = notif_recipient.email if hasattr(notif_recipient, "email") else None
|
|
if recipient_email:
|
|
send_mail(
|
|
subject=subject,
|
|
message=f"{message_en}\n\n{message_ar}",
|
|
from_email=settings.DEFAULT_FROM_EMAIL,
|
|
recipient_list=[recipient_email],
|
|
fail_silently=False,
|
|
)
|
|
else:
|
|
logger.warning(f"No email for recipient {notif_recipient}")
|
|
|
|
# Update complaint
|
|
complaint.second_reminder_sent_at = now
|
|
complaint.save(update_fields=["second_reminder_sent_at"])
|
|
|
|
# Create timeline entry
|
|
if is_unassigned:
|
|
timeline_message = (
|
|
f"SECOND SLA reminder sent to Hospital Admins/Coordinators for UNASSIGNED complaint. "
|
|
f"Complaint is due in {int(hours_remaining)} hours. "
|
|
f"This is the FINAL reminder. Recipients: {', '.join(recipient_names)}"
|
|
)
|
|
else:
|
|
timeline_message = (
|
|
f"SECOND SLA reminder sent to {recipient.get_full_name()}. "
|
|
f"Complaint is due in {int(hours_remaining)} hours. "
|
|
f"This is the final reminder before escalation."
|
|
)
|
|
|
|
ComplaintUpdate.objects.create(
|
|
complaint=complaint,
|
|
update_type="note",
|
|
message=timeline_message,
|
|
created_by=None,
|
|
metadata={
|
|
"event_type": "sla_second_reminder",
|
|
"hours_remaining": int(hours_remaining),
|
|
"is_unassigned": is_unassigned,
|
|
"recipients": recipient_names,
|
|
},
|
|
)
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="sla_second_reminder_sent",
|
|
description=f"Second SLA reminder sent for complaint {complaint.id}",
|
|
content_object=complaint,
|
|
metadata={
|
|
"recipients": recipient_names,
|
|
"hours_remaining": int(hours_remaining),
|
|
"is_unassigned": is_unassigned,
|
|
},
|
|
)
|
|
|
|
reminder_count += 1
|
|
if is_unassigned:
|
|
logger.info(
|
|
f"Second SLA reminder sent for UNASSIGNED complaint {complaint.id} "
|
|
f"to Hospital Admins/Coordinators: {', '.join(recipient_names)} "
|
|
f"({int(hours_remaining)} hours remaining)"
|
|
)
|
|
else:
|
|
logger.info(
|
|
f"Second SLA reminder sent for complaint {complaint.id} "
|
|
f"to {recipient.get_full_name()} "
|
|
f"({int(hours_remaining)} hours remaining)"
|
|
)
|
|
|
|
# Trigger reminder-based escalation check (more urgent now)
|
|
escalate_after_reminder.delay(str(complaint.id))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to send second SLA reminder for complaint {complaint.id}: {str(e)}")
|
|
skipped_count += 1
|
|
|
|
logger.info(f"SLA reminder check complete: {reminder_count} sent, {skipped_count} skipped")
|
|
|
|
return {"status": "completed", "reminders_sent": reminder_count, "skipped": skipped_count}
|
|
|
|
except Exception as e:
|
|
error_msg = f"Error in SLA reminder task: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
# =============================================================================
|
|
# On-Call Admin Notification Tasks for New Complaints
|
|
# =============================================================================
|
|
|
|
|
|
def get_on_call_schedule(hospital=None):
|
|
"""
|
|
Get the active on-call schedule for a hospital or system-wide.
|
|
|
|
Args:
|
|
hospital: Hospital instance or None for system-wide
|
|
|
|
Returns:
|
|
OnCallAdminSchedule instance or None
|
|
"""
|
|
from .models import OnCallAdminSchedule
|
|
|
|
# Try to get hospital-specific schedule first
|
|
if hospital:
|
|
schedule = OnCallAdminSchedule.objects.filter(hospital=hospital, is_active=True).first()
|
|
if schedule:
|
|
return schedule
|
|
|
|
# Fall back to system-wide schedule
|
|
return OnCallAdminSchedule.objects.filter(hospital__isnull=True, is_active=True).first()
|
|
|
|
|
|
def get_admins_to_notify(schedule, check_datetime=None, hospital=None):
|
|
"""
|
|
Get the list of admins to notify based on working hours.
|
|
|
|
During working hours: notify ALL PX Admins
|
|
Outside working hours: notify only ON-CALL admins
|
|
|
|
Args:
|
|
schedule: OnCallAdminSchedule instance
|
|
check_datetime: datetime to check (default: now)
|
|
hospital: Optional hospital to filter admins by
|
|
|
|
Returns:
|
|
tuple: (admins_queryset, is_working_hours_bool)
|
|
"""
|
|
from apps.accounts.models import User
|
|
|
|
if check_datetime is None:
|
|
check_datetime = timezone.now()
|
|
|
|
# Check if it's working time
|
|
is_working_hours = schedule.is_working_time(check_datetime) if schedule else True
|
|
|
|
# Get PX Admin users
|
|
px_admins = User.objects.filter(groups__name="PX Admin", is_active=True)
|
|
|
|
if hospital:
|
|
# For hospital-specific complaints, prefer admins assigned to that hospital
|
|
# but also include system-wide admins
|
|
px_admins = px_admins.filter(Q(hospital=hospital) | Q(hospital__isnull=True))
|
|
|
|
if is_working_hours:
|
|
# During working hours: notify ALL PX Admins
|
|
return px_admins.distinct(), is_working_hours
|
|
else:
|
|
# Outside working hours: notify only ON-CALL admins
|
|
if schedule:
|
|
on_call_admin_ids = schedule.on_call_admins.filter(is_active=True).values_list("admin_user_id", flat=True)
|
|
|
|
# Filter to only on-call admins that are currently active
|
|
from .models import OnCallAdmin
|
|
|
|
active_on_call_ids = []
|
|
today = check_datetime.date()
|
|
for on_call in OnCallAdmin.objects.filter(
|
|
id__in=schedule.on_call_admins.filter(is_active=True).values_list("id", flat=True)
|
|
):
|
|
if on_call.is_currently_active(today):
|
|
active_on_call_ids.append(on_call.admin_user_id)
|
|
|
|
if active_on_call_ids:
|
|
return px_admins.filter(id__in=active_on_call_ids).distinct(), is_working_hours
|
|
|
|
# Fallback: if no on-call admins configured, notify all PX Admins
|
|
logger.warning("No on-call admins configured for after-hours. Notifying all PX Admins.")
|
|
return px_admins.distinct(), is_working_hours
|
|
|
|
|
|
@shared_task
|
|
def notify_admins_new_complaint(complaint_id):
|
|
"""
|
|
Notify PX Admins about a newly created complaint.
|
|
|
|
Notification logic:
|
|
- During working hours (as configured in OnCallAdminSchedule): ALL PX Admins are notified
|
|
- Outside working hours: Only ON-CALL admins are notified
|
|
|
|
Args:
|
|
complaint_id: UUID of the Complaint
|
|
|
|
Returns:
|
|
dict: Result with notification status and details
|
|
"""
|
|
from .models import Complaint, OnCallAdminSchedule
|
|
from apps.notifications.services import NotificationService
|
|
from django.contrib.sites.shortcuts import get_current_site
|
|
from django.urls import reverse
|
|
|
|
try:
|
|
complaint = Complaint.objects.select_related(
|
|
"hospital", "patient", "department", "created_by", "domain", "category"
|
|
).get(id=complaint_id)
|
|
|
|
# Get the appropriate on-call schedule
|
|
schedule = get_on_call_schedule(complaint.hospital)
|
|
|
|
if not schedule:
|
|
# Create default schedule if none exists
|
|
logger.info("No on-call schedule found. Creating default schedule.")
|
|
schedule = OnCallAdminSchedule.objects.create(
|
|
working_days=[0, 1, 2, 3, 4], # Mon-Fri
|
|
work_start_time="08:00",
|
|
work_end_time="17:00",
|
|
timezone="Asia/Riyadh",
|
|
is_active=True,
|
|
)
|
|
|
|
# Get admins to notify
|
|
admins_to_notify, is_working_hours = get_admins_to_notify(schedule, hospital=complaint.hospital)
|
|
|
|
if not admins_to_notify.exists():
|
|
logger.warning(f"No PX Admins found to notify for complaint {complaint_id}")
|
|
return {"status": "warning", "reason": "no_admins_found", "complaint_id": str(complaint_id)}
|
|
|
|
# Build complaint URL
|
|
try:
|
|
site = get_current_site(None)
|
|
domain = site.domain
|
|
except:
|
|
domain = "localhost:8000"
|
|
|
|
complaint_url = f"https://{domain}{reverse('complaints:complaint_detail', kwargs={'pk': complaint_id})}"
|
|
|
|
# Get severity and priority display
|
|
severity_display = (
|
|
complaint.get_severity_display() if hasattr(complaint, "get_severity_display") else complaint.severity
|
|
)
|
|
priority_display = (
|
|
complaint.get_priority_display() if hasattr(complaint, "get_priority_display") else complaint.priority
|
|
)
|
|
|
|
# Determine if high priority (for urgent notification styling)
|
|
is_high_priority = complaint.priority in ["high", "critical"] or complaint.severity in ["high", "critical"]
|
|
priority_badge = "🚨 URGENT" if is_high_priority else "📋 New"
|
|
|
|
# Notification counts
|
|
email_count = 0
|
|
sms_count = 0
|
|
notified_admins = []
|
|
|
|
# Get on-call admin configs for SMS preferences (only for after-hours)
|
|
on_call_configs = {}
|
|
if not is_working_hours and schedule:
|
|
for on_call in schedule.on_call_admins.filter(is_active=True).select_related("admin_user"):
|
|
on_call_configs[on_call.admin_user_id] = on_call
|
|
|
|
for admin in admins_to_notify:
|
|
try:
|
|
# English email subject
|
|
subject_en = f"{priority_badge} Complaint #{complaint.reference_number} - {complaint.title[:50]}"
|
|
|
|
# Arabic email subject
|
|
subject_ar = f"{priority_badge} شكوى جديدة #{complaint.reference_number}"
|
|
|
|
# Render HTML email template
|
|
context = {
|
|
"admin_name": admin.get_full_name() or "Admin",
|
|
"priority_badge": priority_badge,
|
|
"is_high_priority": is_high_priority,
|
|
"reference_number": complaint.reference_number,
|
|
"complaint_title": complaint.title,
|
|
"priority": complaint.priority,
|
|
"severity": complaint.severity,
|
|
"status": complaint.get_status_display()
|
|
if hasattr(complaint, "get_status_display")
|
|
else complaint.status,
|
|
"patient_name": complaint.patient_name or "N/A",
|
|
"mrn": complaint.patient.mrn if complaint.patient else "N/A",
|
|
"contact_phone": complaint.contact_phone or "N/A",
|
|
"contact_email": complaint.contact_email or "N/A",
|
|
"hospital_name": complaint.hospital.name if complaint.hospital else "N/A",
|
|
"department_name": complaint.department.name if complaint.department else "N/A",
|
|
"description": complaint.description[:1000]
|
|
if len(complaint.description) > 1000
|
|
else complaint.description,
|
|
"complaint_url": complaint_url,
|
|
"notification_type": "Working Hours" if is_working_hours else "After Hours (On-Call)",
|
|
"current_time": timezone.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
}
|
|
|
|
# Render HTML template
|
|
html_message = render_to_string("emails/new_complaint_admin_notification.html", context)
|
|
|
|
# Plain text fallback (bilingual)
|
|
message_text = f"""{subject_en}
|
|
|
|
Dear {admin.get_full_name() or "Admin"},
|
|
|
|
A new complaint has been submitted and requires your attention.
|
|
|
|
Reference: {complaint.reference_number}
|
|
Title: {complaint.title}
|
|
Priority: {priority_display}
|
|
Severity: {severity_display}
|
|
Patient: {complaint.patient_name or "N/A"}
|
|
Hospital: {complaint.hospital.name if complaint.hospital else "N/A"}
|
|
|
|
View Complaint: {complaint_url}
|
|
|
|
---
|
|
This is an automated notification from the PX 360 system.
|
|
"""
|
|
|
|
# Send email notification
|
|
try:
|
|
NotificationService.send_email(
|
|
email=admin.email,
|
|
subject=f"{subject_en} / {subject_ar}",
|
|
message=message_text,
|
|
html_message=html_message,
|
|
related_object=complaint,
|
|
metadata={
|
|
"notification_type": "new_complaint_admin",
|
|
"complaint_id": str(complaint_id),
|
|
"is_working_hours": is_working_hours,
|
|
"recipient_role": "px_admin",
|
|
"language": "bilingual",
|
|
},
|
|
)
|
|
email_count += 1
|
|
except Exception as e:
|
|
logger.error(f"Failed to send email to admin {admin.email}: {str(e)}")
|
|
|
|
# Send SMS for high priority complaints OR to after-hours on-call admins
|
|
# After hours: on-call admins get BOTH email and SMS
|
|
should_send_sms = False
|
|
if is_high_priority:
|
|
should_send_sms = True
|
|
elif not is_working_hours:
|
|
# After hours: all on-call admins get SMS (regardless of priority)
|
|
should_send_sms = True
|
|
|
|
if should_send_sms:
|
|
phone = None
|
|
if admin.id in on_call_configs:
|
|
phone = on_call_configs[admin.id].get_notification_phone()
|
|
if not phone and hasattr(admin, "phone"):
|
|
phone = admin.phone
|
|
|
|
if phone:
|
|
try:
|
|
if is_high_priority:
|
|
sms_message = f"🚨 URGENT: New complaint #{complaint.reference_number} - {complaint.title[:50]}. Review: {complaint_url[:100]}"
|
|
else:
|
|
sms_message = f"📋 New complaint #{complaint.reference_number} - {complaint.title[:50]}. Review: {complaint_url[:100]}"
|
|
NotificationService.send_sms(
|
|
phone=phone,
|
|
message=sms_message,
|
|
related_object=complaint,
|
|
metadata={
|
|
"notification_type": "new_complaint_admin_sms",
|
|
"complaint_id": str(complaint_id),
|
|
"is_high_priority": is_high_priority,
|
|
"is_working_hours": is_working_hours,
|
|
},
|
|
)
|
|
sms_count += 1
|
|
except Exception as e:
|
|
logger.error(f"Failed to send SMS to admin {admin.email}: {str(e)}")
|
|
|
|
notified_admins.append({"id": str(admin.id), "email": admin.email, "name": admin.get_full_name()})
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to notify admin {admin.email}: {str(e)}")
|
|
|
|
# Create a timeline entry for the notification
|
|
from .models import ComplaintUpdate
|
|
|
|
ComplaintUpdate.objects.create(
|
|
complaint=complaint,
|
|
update_type="note",
|
|
message=f"Admin notifications sent: {email_count} emails, {sms_count} SMS. "
|
|
f"Type: {'Working hours' if is_working_hours else 'After-hours (on-call)'}. "
|
|
f"Notified: {len(notified_admins)} admins.",
|
|
created_by=None, # System action
|
|
metadata={
|
|
"event_type": "admin_notification_sent",
|
|
"emails_sent": email_count,
|
|
"sms_sent": sms_count,
|
|
"admins_notified": notified_admins,
|
|
"is_working_hours": is_working_hours,
|
|
},
|
|
)
|
|
|
|
logger.info(
|
|
f"Admin notifications sent for complaint {complaint_id}: "
|
|
f"{email_count} emails, {sms_count} SMS to {len(notified_admins)} admins. "
|
|
f"Working hours: {is_working_hours}"
|
|
)
|
|
|
|
return {
|
|
"status": "success",
|
|
"complaint_id": str(complaint_id),
|
|
"is_working_hours": is_working_hours,
|
|
"emails_sent": email_count,
|
|
"sms_sent": sms_count,
|
|
"admins_notified": len(notified_admins),
|
|
"admin_details": notified_admins,
|
|
}
|
|
|
|
except Complaint.DoesNotExist:
|
|
error_msg = f"Complaint {complaint_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error notifying admins for complaint {complaint_id}: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|