HH/apps/surveys/tasks.py

481 lines
18 KiB
Python

"""
Surveys Celery tasks
This module contains tasks for:
- Analyzing survey comments with AI
- Processing survey submissions
- Survey-related background operations
"""
import logging
from celery import shared_task
from django.utils import timezone
logger = logging.getLogger(__name__)
@shared_task
def analyze_survey_comment(survey_instance_id):
"""
Analyze a survey comment using AI to determine sentiment, emotion, and content.
This task is triggered when a survey is completed with a comment.
It uses the AI service to analyze the comment content and classify it.
Args:
survey_instance_id: UUID of the SurveyInstance
Returns:
dict: Result with sentiment, emotion, summary, and reasoning
"""
from apps.surveys.models import SurveyInstance
from apps.core.ai_service import AIService, AIServiceError
try:
survey = SurveyInstance.objects.select_related('patient', 'hospital').get(id=survey_instance_id)
# Check if comment exists
if not survey.comment or not survey.comment.strip():
logger.info(f"No comment to analyze for survey {survey_instance_id}")
return {
'status': 'skipped',
'reason': 'no_comment'
}
# Check if already analyzed
if survey.comment_analyzed:
logger.info(f"Comment already analyzed for survey {survey_instance_id}")
return {
'status': 'skipped',
'reason': 'already_analyzed'
}
logger.info(f"Starting AI analysis for survey comment {survey_instance_id}")
# Analyze sentiment
try:
sentiment_analysis = AIService.classify_sentiment(survey.comment)
sentiment = sentiment_analysis.get('sentiment', 'neutral')
sentiment_score = sentiment_analysis.get('score', 0.0)
sentiment_confidence = sentiment_analysis.get('confidence', 0.0)
except AIServiceError as e:
logger.error(f"Sentiment analysis failed for survey {survey_instance_id}: {str(e)}")
sentiment = 'neutral'
sentiment_score = 0.0
sentiment_confidence = 0.0
# Analyze emotion
try:
emotion_analysis = AIService.analyze_emotion(survey.comment)
emotion = emotion_analysis.get('emotion', 'neutral')
emotion_intensity = emotion_analysis.get('intensity', 0.0)
emotion_confidence = emotion_analysis.get('confidence', 0.0)
except AIServiceError as e:
logger.error(f"Emotion analysis failed for survey {survey_instance_id}: {str(e)}")
emotion = 'neutral'
emotion_intensity = 0.0
emotion_confidence = 0.0
# Generate summary of what the comment is about
try:
# Use chat completion to generate bilingual summaries
summary_prompt = f"""
Analyze this patient survey comment and provide:
1. A brief summary of what the comment is about (in English and Arabic)
2. Key topics mentioned (in English and Arabic)
3. Specific feedback points (positive or negative)
Comment: "{survey.comment}"
Patient context:
- Survey: {survey.survey_template.name}
- Score: {survey.total_score}
- Hospital: {survey.hospital.name}
Respond in JSON format with keys:
- summary_en: English summary
- summary_ar: Arabic summary
- topics_en: List of topics in English
- topics_ar: List of topics in Arabic
- feedback_type: "positive", "negative", or "neutral"
"""
summary_result = AIService.chat_completion(
messages=[
{
"role": "system",
"content": "You are a helpful assistant analyzing patient survey comments. Always respond with valid JSON."
},
{
"role": "user",
"content": summary_prompt
}
],
response_format={"type": "json_object"}
)
# Parse the JSON response
import json
summary_data = json.loads(summary_result)
summary_en = summary_data.get('summary_en', '')
summary_ar = summary_data.get('summary_ar', '')
topics_en = summary_data.get('topics_en', [])
topics_ar = summary_data.get('topics_ar', [])
feedback_type = summary_data.get('feedback_type', 'neutral')
except Exception as e:
logger.error(f"Summary generation failed for survey {survey_instance_id}: {str(e)}")
summary_en = survey.comment[:200] # Fallback to comment text
summary_ar = ''
topics_en = []
topics_ar = []
feedback_type = sentiment # Fallback to sentiment
# Update survey with analysis results
survey.comment_analysis = {
'sentiment': sentiment,
'sentiment_score': sentiment_score,
'sentiment_confidence': sentiment_confidence,
'emotion': emotion,
'emotion_intensity': emotion_intensity,
'emotion_confidence': emotion_confidence,
'summary_en': summary_en,
'summary_ar': summary_ar,
'topics_en': topics_en,
'topics_ar': topics_ar,
'feedback_type': feedback_type,
'analyzed_at': timezone.now().isoformat()
}
survey.comment_analyzed = True
survey.save(update_fields=['comment_analysis', 'comment_analyzed'])
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='survey_comment_analyzed',
description=f"Survey comment analyzed with AI: sentiment={sentiment}, emotion={emotion}",
content_object=survey,
metadata={
'sentiment': sentiment,
'emotion': emotion,
'feedback_type': feedback_type,
'topics': topics_en
}
)
logger.info(
f"AI analysis complete for survey comment {survey_instance_id}: "
f"sentiment={sentiment} ({sentiment_score:.2f}), "
f"emotion={emotion} ({emotion_intensity:.2f}), "
f"feedback_type={feedback_type}"
)
return {
'status': 'success',
'survey_id': str(survey.id),
'sentiment': sentiment,
'sentiment_score': sentiment_score,
'sentiment_confidence': sentiment_confidence,
'emotion': emotion,
'emotion_intensity': emotion_intensity,
'emotion_confidence': emotion_confidence,
'summary_en': summary_en,
'summary_ar': summary_ar,
'topics_en': topics_en,
'topics_ar': topics_ar,
'feedback_type': feedback_type
}
except SurveyInstance.DoesNotExist:
error_msg = f"SurveyInstance {survey_instance_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
@shared_task
def send_satisfaction_feedback(survey_instance_id, user_id):
"""
Send satisfaction feedback form to patient after addressing negative survey.
This task creates a feedback survey to assess patient satisfaction with
how their negative survey concerns were addressed.
Args:
survey_instance_id: UUID of the original negative SurveyInstance
user_id: UUID of the user who is sending the feedback
Returns:
dict: Result with new survey_instance_id
"""
from apps.surveys.models import SurveyInstance, SurveyTemplate
try:
survey = SurveyInstance.objects.select_related(
'patient', 'hospital', 'survey_template'
).get(id=survey_instance_id)
# Get feedback survey template
try:
feedback_template = SurveyTemplate.objects.get(
hospital=survey.hospital,
survey_type='complaint_resolution',
is_active=True
)
except SurveyTemplate.DoesNotExist:
logger.warning(
f"No feedback survey template found for hospital {survey.hospital.name}"
)
return {'status': 'skipped', 'reason': 'no_template'}
# Check if already sent
if survey.satisfaction_feedback_sent:
logger.info(f"Satisfaction feedback already sent for survey {survey_instance_id}")
return {'status': 'skipped', 'reason': 'already_sent'}
# Create feedback survey instance
feedback_survey = SurveyInstance.objects.create(
survey_template=feedback_template,
patient=survey.patient,
encounter_id=survey.encounter_id,
delivery_channel='sms',
recipient_phone=survey.patient.phone,
recipient_email=survey.patient.email,
metadata={
'original_survey_id': str(survey.id),
'original_survey_title': survey.survey_template.name,
'original_score': float(survey.total_score) if survey.total_score else None,
'feedback_type': 'satisfaction'
}
)
# Mark original survey as having feedback sent
survey.satisfaction_feedback_sent = True
survey.satisfaction_feedback_sent_at = timezone.now()
survey.satisfaction_feedback = feedback_survey
survey.save(update_fields=[
'satisfaction_feedback_sent',
'satisfaction_feedback_sent_at',
'satisfaction_feedback'
])
# Send survey invitation
from apps.notifications.services import NotificationService
notification_log = NotificationService.send_survey_invitation(
survey_instance=feedback_survey,
language='en' # TODO: Get from patient preference
)
# Update feedback survey status
feedback_survey.status = 'sent'
feedback_survey.sent_at = timezone.now()
feedback_survey.save(update_fields=['status', 'sent_at'])
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='satisfaction_feedback_sent',
description=f"Satisfaction feedback survey sent for survey: {survey.survey_template.name}",
content_object=feedback_survey,
metadata={
'original_survey_id': str(survey.id),
'feedback_template': feedback_template.name,
'sent_by_user_id': user_id
}
)
logger.info(
f"Satisfaction feedback survey sent for survey {survey_instance_id}"
)
return {
'status': 'sent',
'feedback_survey_id': str(feedback_survey.id),
'notification_log_id': str(notification_log.id)
}
except SurveyInstance.DoesNotExist:
error_msg = f"SurveyInstance {survey_instance_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error sending satisfaction feedback: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}
@shared_task
def create_action_from_negative_survey(survey_instance_id):
"""
Create PX Action from negative survey.
This task is triggered when a survey with negative feedback is completed.
It creates a PX Action to track and address the patient's concerns.
Args:
survey_instance_id: UUID of the SurveyInstance
Returns:
dict: Result with action_id
"""
from apps.surveys.models import SurveyInstance
from apps.px_action_center.models import PXAction, PXActionLog
from apps.core.models import PriorityChoices, SeverityChoices
from django.contrib.contenttypes.models import ContentType
try:
survey = SurveyInstance.objects.select_related(
'survey_template',
'patient',
'hospital',
'department'
).get(id=survey_instance_id)
# Verify survey is negative
if not survey.is_negative:
logger.info(f"Survey {survey_instance_id} is not negative, skipping action creation")
return {'status': 'skipped', 'reason': 'not_negative'}
# Check if action already created
if survey.metadata.get('px_action_created'):
logger.info(f"PX Action already created for survey {survey_instance_id}")
return {'status': 'skipped', 'reason': 'already_created'}
# Calculate score for priority/severity determination
score = float(survey.total_score) if survey.total_score else 0.0
# Determine severity based on score (lower = more severe)
if score <= 2.0:
severity = SeverityChoices.CRITICAL
priority = PriorityChoices.CRITICAL
elif score <= 3.0:
severity = SeverityChoices.HIGH
priority = PriorityChoices.HIGH
elif score <= 4.0:
severity = SeverityChoices.MEDIUM
priority = PriorityChoices.MEDIUM
else:
severity = SeverityChoices.LOW
priority = PriorityChoices.LOW
# Determine category based on survey template or journey stage
category = 'service_quality' # Default
if survey.survey_template.survey_type == 'post_discharge':
category = 'clinical_quality'
elif survey.survey_template.survey_type == 'inpatient_satisfaction':
category = 'service_quality'
elif survey.journey_instance and survey.journey_instance.stage:
stage = survey.journey_instance.stage.lower()
if 'admission' in stage or 'registration' in stage:
category = 'process_improvement'
elif 'treatment' in stage or 'procedure' in stage:
category = 'clinical_quality'
elif 'discharge' in stage or 'billing' in stage:
category = 'process_improvement'
# Build description
description_parts = [
f"Negative survey response with score {score:.1f}/5.0",
f"Survey Template: {survey.survey_template.name}",
]
if survey.comment:
description_parts.append(f"Patient Comment: {survey.comment}")
if survey.journey_instance:
description_parts.append(
f"Journey Stage: {survey.journey_instance.stage}"
)
if survey.encounter_id:
description_parts.append(f"Encounter ID: {survey.encounter_id}")
description = " | ".join(description_parts)
# Create PX Action
survey_ct = ContentType.objects.get_for_model(SurveyInstance)
action = PXAction.objects.create(
source_type='survey',
content_type=survey_ct,
object_id=survey.id,
title=f"Negative Survey: {survey.survey_template.name} (Score: {score:.1f})",
description=description,
hospital=survey.hospital,
department=survey.department,
category=category,
priority=priority,
severity=severity,
status='open',
metadata={
'source_survey_id': str(survey.id),
'source_survey_template': survey.survey_template.name,
'survey_score': score,
'is_negative': True,
'has_comment': bool(survey.comment),
'encounter_id': survey.encounter_id,
'auto_created': True
}
)
# Create action log entry
PXActionLog.objects.create(
action=action,
log_type='note',
message=(
f"Action automatically created from negative survey. "
f"Score: {score:.1f}, Template: {survey.survey_template.name}"
),
metadata={
'survey_id': str(survey.id),
'survey_score': score,
'auto_created': True,
'severity': severity,
'priority': priority
}
)
# Update survey metadata to track action creation
if not survey.metadata:
survey.metadata = {}
survey.metadata['px_action_created'] = True
survey.metadata['px_action_id'] = str(action.id)
survey.save(update_fields=['metadata'])
# Log audit
from apps.core.services import create_audit_log
create_audit_log(
event_type='px_action_created',
description=f"PX Action created from negative survey: {survey.survey_template.name}",
content_object=action,
metadata={
'survey_id': str(survey.id),
'survey_template': survey.survey_template.name,
'survey_score': score,
'trigger': 'negative_survey'
}
)
logger.info(
f"Created PX Action {action.id} from negative survey {survey_instance_id} "
f"(score: {score:.1f}, severity: {severity})"
)
return {
'status': 'action_created',
'action_id': str(action.id),
'survey_score': score,
'severity': severity,
'priority': priority
}
except SurveyInstance.DoesNotExist:
error_msg = f"SurveyInstance {survey_instance_id} not found"
logger.error(error_msg)
return {'status': 'error', 'reason': error_msg}
except Exception as e:
error_msg = f"Error creating action from negative survey: {str(e)}"
logger.error(error_msg, exc_info=True)
return {'status': 'error', 'reason': error_msg}