738 lines
28 KiB
Python
738 lines
28 KiB
Python
"""
|
|
Surveys Celery tasks
|
|
|
|
This module contains tasks for:
|
|
- Analyzing survey comments with AI
|
|
- Processing survey submissions
|
|
- Survey-related background operations
|
|
- Bulk survey sending
|
|
"""
|
|
|
|
import logging
|
|
|
|
from celery import shared_task
|
|
from django.utils import timezone
|
|
from django.db import transaction
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@shared_task
|
|
def analyze_survey_comment(survey_instance_id):
|
|
"""
|
|
Analyze a survey comment using AI to determine sentiment, emotion, and content.
|
|
|
|
This task is triggered when a survey is completed with a comment.
|
|
It uses the AI service to analyze the comment content and classify it.
|
|
|
|
Args:
|
|
survey_instance_id: UUID of the SurveyInstance
|
|
|
|
Returns:
|
|
dict: Result with sentiment, emotion, summary, and reasoning
|
|
"""
|
|
from apps.surveys.models import SurveyInstance
|
|
from apps.core.ai_service import AIService, AIServiceError
|
|
|
|
try:
|
|
survey = SurveyInstance.objects.select_related("patient", "hospital").get(id=survey_instance_id)
|
|
|
|
# Check if comment exists
|
|
if not survey.comment or not survey.comment.strip():
|
|
logger.info(f"No comment to analyze for survey {survey_instance_id}")
|
|
return {"status": "skipped", "reason": "no_comment"}
|
|
|
|
# Check if already analyzed
|
|
if survey.comment_analyzed:
|
|
logger.info(f"Comment already analyzed for survey {survey_instance_id}")
|
|
return {"status": "skipped", "reason": "already_analyzed"}
|
|
|
|
logger.info(f"Starting AI analysis for survey comment {survey_instance_id}")
|
|
|
|
# Analyze sentiment
|
|
try:
|
|
sentiment_analysis = AIService.classify_sentiment(survey.comment)
|
|
sentiment = sentiment_analysis.get("sentiment", "neutral")
|
|
sentiment_score = sentiment_analysis.get("score", 0.0)
|
|
sentiment_confidence = sentiment_analysis.get("confidence", 0.0)
|
|
except AIServiceError as e:
|
|
logger.error(f"Sentiment analysis failed for survey {survey_instance_id}: {str(e)}")
|
|
sentiment = "neutral"
|
|
sentiment_score = 0.0
|
|
sentiment_confidence = 0.0
|
|
|
|
# Analyze emotion
|
|
try:
|
|
emotion_analysis = AIService.analyze_emotion(survey.comment)
|
|
emotion = emotion_analysis.get("emotion", "neutral")
|
|
emotion_intensity = emotion_analysis.get("intensity", 0.0)
|
|
emotion_confidence = emotion_analysis.get("confidence", 0.0)
|
|
except AIServiceError as e:
|
|
logger.error(f"Emotion analysis failed for survey {survey_instance_id}: {str(e)}")
|
|
emotion = "neutral"
|
|
emotion_intensity = 0.0
|
|
emotion_confidence = 0.0
|
|
|
|
# Generate summary of what the comment is about
|
|
try:
|
|
# Use chat completion to generate bilingual summaries
|
|
summary_prompt = f"""
|
|
Analyze this patient survey comment and provide:
|
|
1. A brief summary of what the comment is about (in English and Arabic)
|
|
2. Key topics mentioned (in English and Arabic)
|
|
3. Specific feedback points (positive or negative)
|
|
|
|
Comment: "{survey.comment}"
|
|
|
|
Patient context:
|
|
- Survey: {survey.survey_template.name}
|
|
- Score: {survey.total_score}
|
|
- Hospital: {survey.hospital.name}
|
|
|
|
Respond in JSON format with keys:
|
|
- summary_en: English summary
|
|
- summary_ar: Arabic summary
|
|
- topics_en: List of topics in English
|
|
- topics_ar: List of topics in Arabic
|
|
- feedback_type: "positive", "negative", or "neutral"
|
|
"""
|
|
|
|
summary_result = AIService.chat_completion(
|
|
messages=[
|
|
{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant analyzing patient survey comments. Always respond with valid JSON.",
|
|
},
|
|
{"role": "user", "content": summary_prompt},
|
|
],
|
|
response_format={"type": "json_object"},
|
|
)
|
|
|
|
# Parse the JSON response
|
|
import json
|
|
|
|
summary_data = json.loads(summary_result)
|
|
|
|
summary_en = summary_data.get("summary_en", "")
|
|
summary_ar = summary_data.get("summary_ar", "")
|
|
topics_en = summary_data.get("topics_en", [])
|
|
topics_ar = summary_data.get("topics_ar", [])
|
|
feedback_type = summary_data.get("feedback_type", "neutral")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Summary generation failed for survey {survey_instance_id}: {str(e)}")
|
|
summary_en = survey.comment[:200] # Fallback to comment text
|
|
summary_ar = ""
|
|
topics_en = []
|
|
topics_ar = []
|
|
feedback_type = sentiment # Fallback to sentiment
|
|
|
|
# Update survey with analysis results
|
|
survey.comment_analysis = {
|
|
"sentiment": sentiment,
|
|
"sentiment_score": sentiment_score,
|
|
"sentiment_confidence": sentiment_confidence,
|
|
"emotion": emotion,
|
|
"emotion_intensity": emotion_intensity,
|
|
"emotion_confidence": emotion_confidence,
|
|
"summary_en": summary_en,
|
|
"summary_ar": summary_ar,
|
|
"topics_en": topics_en,
|
|
"topics_ar": topics_ar,
|
|
"feedback_type": feedback_type,
|
|
"analyzed_at": timezone.now().isoformat(),
|
|
}
|
|
survey.comment_analyzed = True
|
|
survey.save(update_fields=["comment_analysis", "comment_analyzed"])
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="survey_comment_analyzed",
|
|
description=f"Survey comment analyzed with AI: sentiment={sentiment}, emotion={emotion}",
|
|
content_object=survey,
|
|
metadata={"sentiment": sentiment, "emotion": emotion, "feedback_type": feedback_type, "topics": topics_en},
|
|
)
|
|
|
|
logger.info(
|
|
f"AI analysis complete for survey comment {survey_instance_id}: "
|
|
f"sentiment={sentiment} ({sentiment_score:.2f}), "
|
|
f"emotion={emotion} ({emotion_intensity:.2f}), "
|
|
f"feedback_type={feedback_type}"
|
|
)
|
|
|
|
return {
|
|
"status": "success",
|
|
"survey_id": str(survey.id),
|
|
"sentiment": sentiment,
|
|
"sentiment_score": sentiment_score,
|
|
"sentiment_confidence": sentiment_confidence,
|
|
"emotion": emotion,
|
|
"emotion_intensity": emotion_intensity,
|
|
"emotion_confidence": emotion_confidence,
|
|
"summary_en": summary_en,
|
|
"summary_ar": summary_ar,
|
|
"topics_en": topics_en,
|
|
"topics_ar": topics_ar,
|
|
"feedback_type": feedback_type,
|
|
}
|
|
|
|
except SurveyInstance.DoesNotExist:
|
|
error_msg = f"SurveyInstance {survey_instance_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def send_satisfaction_feedback(survey_instance_id, user_id):
|
|
"""
|
|
Send satisfaction feedback form to patient after addressing negative survey.
|
|
|
|
This task creates a feedback survey to assess patient satisfaction with
|
|
how their negative survey concerns were addressed.
|
|
|
|
Args:
|
|
survey_instance_id: UUID of the original negative SurveyInstance
|
|
user_id: UUID of the user who is sending the feedback
|
|
|
|
Returns:
|
|
dict: Result with new survey_instance_id
|
|
"""
|
|
from apps.surveys.models import SurveyInstance, SurveyTemplate
|
|
|
|
try:
|
|
survey = SurveyInstance.objects.select_related("patient", "hospital", "survey_template").get(
|
|
id=survey_instance_id
|
|
)
|
|
|
|
# Get feedback survey template
|
|
try:
|
|
feedback_template = SurveyTemplate.objects.get(
|
|
hospital=survey.hospital, survey_type="complaint_resolution", is_active=True
|
|
)
|
|
except SurveyTemplate.DoesNotExist:
|
|
logger.warning(f"No feedback survey template found for hospital {survey.hospital.name}")
|
|
return {"status": "skipped", "reason": "no_template"}
|
|
|
|
# Check if already sent
|
|
if survey.satisfaction_feedback_sent:
|
|
logger.info(f"Satisfaction feedback already sent for survey {survey_instance_id}")
|
|
return {"status": "skipped", "reason": "already_sent"}
|
|
|
|
# Create feedback survey instance
|
|
feedback_survey = SurveyInstance.objects.create(
|
|
survey_template=feedback_template,
|
|
patient=survey.patient,
|
|
encounter_id=survey.encounter_id,
|
|
delivery_channel="sms",
|
|
recipient_phone=survey.patient.phone,
|
|
recipient_email=survey.patient.email,
|
|
metadata={
|
|
"original_survey_id": str(survey.id),
|
|
"original_survey_title": survey.survey_template.name,
|
|
"original_score": float(survey.total_score) if survey.total_score else None,
|
|
"feedback_type": "satisfaction",
|
|
},
|
|
)
|
|
|
|
# Mark original survey as having feedback sent
|
|
survey.satisfaction_feedback_sent = True
|
|
survey.satisfaction_feedback_sent_at = timezone.now()
|
|
survey.satisfaction_feedback = feedback_survey
|
|
survey.save(
|
|
update_fields=["satisfaction_feedback_sent", "satisfaction_feedback_sent_at", "satisfaction_feedback"]
|
|
)
|
|
|
|
# Send survey invitation
|
|
from apps.notifications.services import NotificationService
|
|
|
|
notification_log = NotificationService.send_survey_invitation(
|
|
survey_instance=feedback_survey,
|
|
language="en", # TODO: Get from patient preference
|
|
)
|
|
|
|
# Update feedback survey status
|
|
feedback_survey.status = "sent"
|
|
feedback_survey.sent_at = timezone.now()
|
|
feedback_survey.save(update_fields=["status", "sent_at"])
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="satisfaction_feedback_sent",
|
|
description=f"Satisfaction feedback survey sent for survey: {survey.survey_template.name}",
|
|
content_object=feedback_survey,
|
|
metadata={
|
|
"original_survey_id": str(survey.id),
|
|
"feedback_template": feedback_template.name,
|
|
"sent_by_user_id": user_id,
|
|
},
|
|
)
|
|
|
|
logger.info(f"Satisfaction feedback survey sent for survey {survey_instance_id}")
|
|
|
|
return {
|
|
"status": "sent",
|
|
"feedback_survey_id": str(feedback_survey.id),
|
|
"notification_log_id": str(notification_log.id),
|
|
}
|
|
|
|
except SurveyInstance.DoesNotExist:
|
|
error_msg = f"SurveyInstance {survey_instance_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error sending satisfaction feedback: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task
|
|
def create_action_from_negative_survey(survey_instance_id):
|
|
"""
|
|
Create PX Action from negative survey.
|
|
|
|
This task is triggered when a survey with negative feedback is completed.
|
|
It creates a PX Action to track and address the patient's concerns.
|
|
|
|
Args:
|
|
survey_instance_id: UUID of the SurveyInstance
|
|
|
|
Returns:
|
|
dict: Result with action_id
|
|
"""
|
|
from apps.surveys.models import SurveyInstance
|
|
from apps.px_action_center.models import PXAction, PXActionLog
|
|
from apps.core.models import PriorityChoices, SeverityChoices
|
|
from django.contrib.contenttypes.models import ContentType
|
|
|
|
try:
|
|
survey = SurveyInstance.objects.select_related("survey_template", "patient", "hospital").get(
|
|
id=survey_instance_id
|
|
)
|
|
|
|
# Verify survey is negative
|
|
if not survey.is_negative:
|
|
logger.info(f"Survey {survey_instance_id} is not negative, skipping action creation")
|
|
return {"status": "skipped", "reason": "not_negative"}
|
|
|
|
# Check if action already created
|
|
if survey.metadata.get("px_action_created"):
|
|
logger.info(f"PX Action already created for survey {survey_instance_id}")
|
|
return {"status": "skipped", "reason": "already_created"}
|
|
|
|
# Calculate score for priority/severity determination
|
|
score = float(survey.total_score) if survey.total_score else 0.0
|
|
|
|
# Determine severity based on score (lower = more severe)
|
|
if score <= 2.0:
|
|
severity = SeverityChoices.CRITICAL
|
|
priority = PriorityChoices.CRITICAL
|
|
elif score <= 3.0:
|
|
severity = SeverityChoices.HIGH
|
|
priority = PriorityChoices.HIGH
|
|
elif score <= 4.0:
|
|
severity = SeverityChoices.MEDIUM
|
|
priority = PriorityChoices.MEDIUM
|
|
else:
|
|
severity = SeverityChoices.LOW
|
|
priority = PriorityChoices.LOW
|
|
|
|
# Determine category based on survey template or journey stage
|
|
category = "service_quality" # Default
|
|
if survey.survey_template.survey_type == "post_discharge":
|
|
category = "clinical_quality"
|
|
elif survey.survey_template.survey_type == "inpatient_satisfaction":
|
|
category = "service_quality"
|
|
elif survey.journey_instance and survey.journey_instance.stage:
|
|
stage = survey.journey_instance.stage.lower()
|
|
if "admission" in stage or "registration" in stage:
|
|
category = "process_improvement"
|
|
elif "treatment" in stage or "procedure" in stage:
|
|
category = "clinical_quality"
|
|
elif "discharge" in stage or "billing" in stage:
|
|
category = "process_improvement"
|
|
|
|
# Build description
|
|
description_parts = [
|
|
f"Negative survey response with score {score:.1f}/5.0",
|
|
f"Survey Template: {survey.survey_template.name}",
|
|
]
|
|
|
|
if survey.comment:
|
|
description_parts.append(f"Patient Comment: {survey.comment}")
|
|
|
|
if survey.journey_instance:
|
|
description_parts.append(f"Journey Stage: {survey.journey_instance.stage}")
|
|
|
|
if survey.encounter_id:
|
|
description_parts.append(f"Encounter ID: {survey.encounter_id}")
|
|
|
|
description = " | ".join(description_parts)
|
|
|
|
# Create PX Action
|
|
survey_ct = ContentType.objects.get_for_model(SurveyInstance)
|
|
|
|
action = PXAction.objects.create(
|
|
source_type="survey",
|
|
content_type=survey_ct,
|
|
object_id=survey.id,
|
|
title=f"Negative Survey: {survey.survey_template.name} (Score: {score:.1f})",
|
|
description=description,
|
|
hospital=survey.hospital,
|
|
department=None,
|
|
category=category,
|
|
priority=priority,
|
|
severity=severity,
|
|
status="open",
|
|
metadata={
|
|
"source_survey_id": str(survey.id),
|
|
"source_survey_template": survey.survey_template.name,
|
|
"survey_score": score,
|
|
"is_negative": True,
|
|
"has_comment": bool(survey.comment),
|
|
"encounter_id": survey.encounter_id,
|
|
"auto_created": True,
|
|
},
|
|
)
|
|
|
|
# Create action log entry
|
|
PXActionLog.objects.create(
|
|
action=action,
|
|
log_type="note",
|
|
message=(
|
|
f"Action automatically created from negative survey. "
|
|
f"Score: {score:.1f}, Template: {survey.survey_template.name}"
|
|
),
|
|
metadata={
|
|
"survey_id": str(survey.id),
|
|
"survey_score": score,
|
|
"auto_created": True,
|
|
"severity": severity,
|
|
"priority": priority,
|
|
},
|
|
)
|
|
|
|
# Update survey metadata to track action creation
|
|
if not survey.metadata:
|
|
survey.metadata = {}
|
|
survey.metadata["px_action_created"] = True
|
|
survey.metadata["px_action_id"] = str(action.id)
|
|
survey.save(update_fields=["metadata"])
|
|
|
|
# Log audit
|
|
from apps.core.services import create_audit_log
|
|
|
|
create_audit_log(
|
|
event_type="px_action_created",
|
|
description=f"PX Action created from negative survey: {survey.survey_template.name}",
|
|
content_object=action,
|
|
metadata={
|
|
"survey_id": str(survey.id),
|
|
"survey_template": survey.survey_template.name,
|
|
"survey_score": score,
|
|
"trigger": "negative_survey",
|
|
},
|
|
)
|
|
|
|
logger.info(
|
|
f"Created PX Action {action.id} from negative survey {survey_instance_id} "
|
|
f"(score: {score:.1f}, severity: {severity})"
|
|
)
|
|
|
|
return {
|
|
"status": "action_created",
|
|
"action_id": str(action.id),
|
|
"survey_score": score,
|
|
"severity": severity,
|
|
"priority": priority,
|
|
}
|
|
|
|
except SurveyInstance.DoesNotExist:
|
|
error_msg = f"SurveyInstance {survey_instance_id} not found"
|
|
logger.error(error_msg)
|
|
return {"status": "error", "reason": error_msg}
|
|
except Exception as e:
|
|
error_msg = f"Error creating action from negative survey: {str(e)}"
|
|
logger.error(error_msg, exc_info=True)
|
|
return {"status": "error", "reason": error_msg}
|
|
|
|
|
|
@shared_task(bind=True, max_retries=3)
|
|
def send_bulk_surveys(self, job_id):
|
|
"""
|
|
Send surveys to multiple patients in the background.
|
|
|
|
This task processes a BulkSurveyJob and sends surveys to all patients.
|
|
It updates the job progress as it goes and handles errors gracefully.
|
|
|
|
Args:
|
|
job_id: UUID of the BulkSurveyJob
|
|
|
|
Returns:
|
|
dict: Result with counts and status
|
|
"""
|
|
from apps.surveys.models import BulkSurveyJob, SurveyInstance, SurveyStatus, SurveyTemplate
|
|
from apps.organizations.models import Patient, Hospital
|
|
from apps.surveys.services import SurveyDeliveryService
|
|
from apps.core.services import AuditService
|
|
|
|
try:
|
|
# Get the job
|
|
job = BulkSurveyJob.objects.get(id=job_id)
|
|
|
|
# Update status to processing
|
|
job.status = BulkSurveyJob.JobStatus.PROCESSING
|
|
job.started_at = timezone.now()
|
|
job.save(update_fields=["status", "started_at"])
|
|
|
|
logger.info(f"Starting bulk survey job {job_id} for {job.total_patients} patients")
|
|
|
|
# Get settings
|
|
survey_template = job.survey_template
|
|
hospital = job.hospital
|
|
delivery_channel = job.delivery_channel
|
|
custom_message = job.custom_message
|
|
patient_data_list = job.patient_data
|
|
|
|
# Results tracking
|
|
success_count = 0
|
|
failed_count = 0
|
|
failed_patients = []
|
|
created_survey_ids = []
|
|
|
|
# Process each patient
|
|
for idx, patient_info in enumerate(patient_data_list):
|
|
try:
|
|
# Update progress periodically
|
|
if idx % 5 == 0:
|
|
job.processed_count = idx
|
|
job.save(update_fields=["processed_count"])
|
|
|
|
# Get patient
|
|
patient_id = patient_info.get("patient_id")
|
|
file_number = patient_info.get("file_number", "unknown")
|
|
|
|
try:
|
|
patient = Patient.objects.get(id=patient_id)
|
|
except Patient.DoesNotExist:
|
|
failed_count += 1
|
|
failed_patients.append({"file_number": file_number, "reason": "Patient not found"})
|
|
continue
|
|
|
|
# Determine delivery channels
|
|
channels = []
|
|
if delivery_channel in ["sms", "both"] and patient.phone:
|
|
channels.append("sms")
|
|
if delivery_channel in ["email", "both"] and patient.email:
|
|
channels.append("email")
|
|
|
|
if not channels:
|
|
failed_count += 1
|
|
failed_patients.append(
|
|
{
|
|
"file_number": file_number,
|
|
"patient_name": patient.get_full_name(),
|
|
"reason": "No contact information",
|
|
}
|
|
)
|
|
continue
|
|
|
|
# Create and send survey for each channel
|
|
for channel in channels:
|
|
survey_instance = SurveyInstance.objects.create(
|
|
survey_template=survey_template,
|
|
patient=patient,
|
|
hospital=hospital,
|
|
delivery_channel=channel,
|
|
status=SurveyStatus.SENT,
|
|
recipient_phone=patient.phone if channel == "sms" else "",
|
|
recipient_email=patient.email if channel == "email" else "",
|
|
metadata={
|
|
"sent_manually": True,
|
|
"sent_by": str(job.created_by.id) if job.created_by else None,
|
|
"custom_message": custom_message,
|
|
"recipient_type": "bulk_import",
|
|
"his_file_number": file_number,
|
|
"bulk_job_id": str(job.id),
|
|
},
|
|
)
|
|
|
|
# Send survey
|
|
success = SurveyDeliveryService.deliver_survey(survey_instance)
|
|
|
|
if success:
|
|
success_count += 1
|
|
created_survey_ids.append(str(survey_instance.id))
|
|
else:
|
|
failed_count += 1
|
|
failed_patients.append(
|
|
{
|
|
"file_number": file_number,
|
|
"patient_name": patient.get_full_name(),
|
|
"reason": "Delivery failed",
|
|
}
|
|
)
|
|
survey_instance.delete()
|
|
|
|
except Exception as e:
|
|
failed_count += 1
|
|
failed_patients.append({"file_number": patient_info.get("file_number", "unknown"), "reason": str(e)})
|
|
logger.error(f"Error processing patient in bulk job {job_id}: {e}")
|
|
|
|
# Update job with final results
|
|
job.processed_count = len(patient_data_list)
|
|
job.success_count = success_count
|
|
job.failed_count = failed_count
|
|
job.results = {
|
|
"success_count": success_count,
|
|
"failed_count": failed_count,
|
|
"failed_patients": failed_patients[:50], # Limit stored failures
|
|
"survey_ids": created_survey_ids[:100], # Limit stored IDs
|
|
}
|
|
|
|
# Determine final status
|
|
if failed_count == 0:
|
|
job.status = BulkSurveyJob.JobStatus.COMPLETED
|
|
elif success_count == 0:
|
|
job.status = BulkSurveyJob.JobStatus.FAILED
|
|
job.error_message = "All surveys failed to send"
|
|
else:
|
|
job.status = BulkSurveyJob.JobStatus.PARTIAL
|
|
|
|
job.completed_at = timezone.now()
|
|
job.save()
|
|
|
|
# Log audit
|
|
AuditService.log_event(
|
|
event_type="bulk_survey_completed",
|
|
description=f"Bulk survey job completed: {success_count} sent, {failed_count} failed",
|
|
user=job.created_by,
|
|
metadata={
|
|
"job_id": str(job.id),
|
|
"hospital": hospital.name,
|
|
"survey_template": survey_template.name,
|
|
"success_count": success_count,
|
|
"failed_count": failed_count,
|
|
},
|
|
)
|
|
|
|
logger.info(f"Bulk survey job {job_id} completed: {success_count} success, {failed_count} failed")
|
|
|
|
return {
|
|
"status": "success",
|
|
"job_id": str(job.id),
|
|
"success_count": success_count,
|
|
"failed_count": failed_count,
|
|
"total": len(patient_data_list),
|
|
}
|
|
|
|
except BulkSurveyJob.DoesNotExist:
|
|
logger.error(f"BulkSurveyJob {job_id} not found")
|
|
return {"status": "error", "error": "Job not found"}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error in bulk survey task {job_id}: {e}", exc_info=True)
|
|
|
|
# Update job status to failed
|
|
try:
|
|
job = BulkSurveyJob.objects.get(id=job_id)
|
|
job.status = BulkSurveyJob.JobStatus.FAILED
|
|
job.error_message = str(e)
|
|
job.completed_at = timezone.now()
|
|
job.save()
|
|
except:
|
|
pass
|
|
|
|
# Retry on failure
|
|
if self.request.retries < self.max_retries:
|
|
logger.info(f"Retrying bulk survey job {job_id} (attempt {self.request.retries + 1})")
|
|
raise self.retry(countdown=60 * (self.request.retries + 1))
|
|
|
|
return {"status": "error", "error": str(e)}
|
|
|
|
|
|
@shared_task
|
|
def send_scheduled_survey(survey_instance_id):
|
|
"""
|
|
Send a scheduled survey.
|
|
|
|
This task is called after the delay period expires.
|
|
It sends the survey via the configured delivery channel (SMS/Email).
|
|
|
|
Args:
|
|
survey_instance_id: UUID of the SurveyInstance to send
|
|
|
|
Returns:
|
|
dict: Result with status and details
|
|
"""
|
|
from apps.surveys.models import SurveyInstance, SurveyStatus
|
|
from apps.surveys.services import SurveyDeliveryService
|
|
|
|
try:
|
|
survey = SurveyInstance.objects.get(id=survey_instance_id)
|
|
|
|
# Check if already sent
|
|
if survey.status != SurveyStatus.PENDING:
|
|
logger.warning(f"Survey {survey.id} already sent/cancelled (status: {survey.status})")
|
|
return {"status": "skipped", "reason": "already_sent", "survey_id": survey.id}
|
|
|
|
# Check if scheduled time has passed
|
|
if survey.scheduled_send_at and survey.scheduled_send_at > timezone.now():
|
|
logger.warning(f"Survey {survey.id} not due yet (scheduled: {survey.scheduled_send_at})")
|
|
return {"status": "delayed", "scheduled_at": survey.scheduled_send_at.isoformat(), "survey_id": survey.id}
|
|
|
|
# Send survey
|
|
success = SurveyDeliveryService.deliver_survey(survey)
|
|
|
|
if success:
|
|
survey.status = SurveyStatus.SENT
|
|
survey.sent_at = timezone.now()
|
|
survey.save()
|
|
logger.info(f"Scheduled survey {survey.id} sent successfully")
|
|
return {"status": "sent", "survey_id": survey.id}
|
|
else:
|
|
survey.status = SurveyStatus.FAILED
|
|
survey.save()
|
|
logger.error(f"Scheduled survey {survey.id} delivery failed")
|
|
return {"status": "failed", "survey_id": survey.id, "reason": "delivery_failed"}
|
|
|
|
except SurveyInstance.DoesNotExist:
|
|
logger.error(f"Survey {survey_instance_id} not found")
|
|
return {"status": "error", "reason": "not_found"}
|
|
except Exception as e:
|
|
logger.error(f"Error sending scheduled survey: {e}", exc_info=True)
|
|
return {"status": "error", "reason": str(e)}
|
|
|
|
|
|
@shared_task
|
|
def send_pending_scheduled_surveys():
|
|
"""
|
|
Periodic task to send any overdue scheduled surveys.
|
|
|
|
Runs every 10 minutes as a safety net to catch any surveys
|
|
that weren't sent due to task failures or delays.
|
|
|
|
Returns:
|
|
dict: Result with count of queued surveys
|
|
"""
|
|
from apps.surveys.models import SurveyInstance
|
|
|
|
# Find surveys that should have been sent but weren't
|
|
# Use sent_at__isnull=True since there's no PENDING status
|
|
overdue_surveys = SurveyInstance.objects.filter(sent_at__isnull=True, scheduled_send_at__lte=timezone.now())[
|
|
:50
|
|
] # Max 50 at a time
|
|
|
|
sent_count = 0
|
|
for survey in overdue_surveys:
|
|
send_scheduled_survey.delay(str(survey.id))
|
|
sent_count += 1
|
|
|
|
if sent_count > 0:
|
|
logger.info(f"Queued {sent_count} overdue scheduled surveys")
|
|
|
|
return {"queued": sent_count}
|