HH/apps/integrations/tasks.py
2026-03-09 16:10:24 +03:00

445 lines
16 KiB
Python

"""
Integrations Celery tasks
This module contains the core event processing logic that:
1. Processes inbound events from external systems
2. Finds matching journey instances
3. Completes journey stages
4. Triggers survey creation
5. Fetches surveys from HIS systems (every 5 minutes)
"""
import logging
from celery import shared_task
from django.db import transaction
from django.utils import timezone
logger = logging.getLogger("apps.integrations")
@shared_task(bind=True, max_retries=3)
def process_inbound_event(self, event_id):
"""
Process an inbound integration event.
This is the core event processing task that:
1. Finds the journey instance by encounter_id
2. Finds the matching stage by trigger_event_code
3. Completes the stage
4. Creates survey instance if configured
5. Logs audit events
Args:
event_id: UUID of the InboundEvent to process
Returns:
dict: Processing result with status and details
"""
from apps.core.services import create_audit_log
from apps.integrations.models import InboundEvent
from apps.journeys.models import PatientJourneyInstance, PatientJourneyStageInstance, StageStatus
from apps.organizations.models import Department, Staff
try:
# Get the event
event = InboundEvent.objects.get(id=event_id)
event.mark_processing()
logger.info(f"Processing event {event.id}: {event.event_code} for encounter {event.encounter_id}")
# Find journey instance by encounter_id
try:
journey_instance = PatientJourneyInstance.objects.select_related(
"journey_template", "patient", "hospital"
).get(encounter_id=event.encounter_id)
except PatientJourneyInstance.DoesNotExist:
error_msg = f"No journey instance found for encounter {event.encounter_id}"
logger.warning(error_msg)
event.mark_ignored(error_msg)
return {"status": "ignored", "reason": error_msg}
# Find matching stage by trigger_event_code
matching_stages = journey_instance.stage_instances.filter(
stage_template__trigger_event_code=event.event_code,
status__in=[StageStatus.PENDING, StageStatus.IN_PROGRESS],
).select_related("stage_template")
if not matching_stages.exists():
error_msg = f"No pending stage found with trigger {event.event_code}"
logger.warning(error_msg)
event.mark_ignored(error_msg)
return {"status": "ignored", "reason": error_msg}
# Get the first matching stage
stage_instance = matching_stages.first()
# Extract staff and department from event payload
staff = None
department = None
if event.physician_license:
try:
staff = Staff.objects.get(license_number=event.physician_license, hospital=journey_instance.hospital)
except Staff.DoesNotExist:
logger.warning(f"Staff member not found with license: {event.physician_license}")
if event.department_code:
try:
department = Department.objects.get(code=event.department_code, hospital=journey_instance.hospital)
except Department.DoesNotExist:
logger.warning(f"Department not found: {event.department_code}")
# Complete the stage
with transaction.atomic():
success = stage_instance.complete(
event=event, staff=staff, department=department, metadata=event.payload_json
)
if success:
# Log stage completion
create_audit_log(
event_type="stage_completed",
description=f"Stage {stage_instance.stage_template.name} completed for encounter {event.encounter_id}",
content_object=stage_instance,
metadata={
"event_code": event.event_code,
"stage_name": stage_instance.stage_template.name,
"journey_type": journey_instance.journey_template.journey_type,
},
)
# Check if this is a discharge event
if event.event_code.upper() == "PATIENT_DISCHARGED":
logger.info(f"Discharge event received for encounter {event.encounter_id}")
# Mark journey as completed
journey_instance.status = "completed"
journey_instance.completed_at = timezone.now()
journey_instance.save()
# Check if post-discharge survey is enabled
if journey_instance.journey_template.send_post_discharge_survey:
logger.info(
f"Post-discharge survey enabled for journey {journey_instance.id}. "
f"Will send in {journey_instance.journey_template.post_discharge_survey_delay_hours} hour(s)"
)
# Queue post-discharge survey creation task with delay
from apps.surveys.tasks import create_post_discharge_survey
delay_hours = journey_instance.journey_template.post_discharge_survey_delay_hours
delay_seconds = delay_hours * 3600
create_post_discharge_survey.apply_async(
args=[str(journey_instance.id)], countdown=delay_seconds
)
logger.info(
f"Queued post-discharge survey for journey {journey_instance.id} (delay: {delay_hours}h)"
)
else:
logger.info(f"Post-discharge survey disabled for journey {journey_instance.id}")
# Mark event as processed
event.mark_processed()
logger.info(
f"Successfully processed event {event.id}: Completed stage {stage_instance.stage_template.name}"
)
return {
"status": "processed",
"stage_completed": stage_instance.stage_template.name,
"journey_completion": journey_instance.get_completion_percentage(),
}
else:
error_msg = "Failed to complete stage"
event.mark_failed(error_msg)
return {"status": "failed", "reason": error_msg}
except InboundEvent.DoesNotExist:
error_msg = f"Event {event_id} not found"
logger.error(error_msg)
return {"status": "error", "reason": error_msg}
except Exception as e:
error_msg = f"Error processing event: {str(e)}"
logger.error(error_msg, exc_info=True)
try:
event.mark_failed(error_msg)
except:
pass
# Retry the task
raise self.retry(exc=e, countdown=60 * (self.request.retries + 1))
@shared_task
def process_pending_events():
"""
Periodic task to process pending events.
This task runs every minute (configured in config/celery.py)
and processes all pending events.
"""
from apps.integrations.models import InboundEvent
pending_events = InboundEvent.objects.filter(status="pending").order_by("received_at")[
:100
] # Process max 100 at a time
processed_count = 0
for event in pending_events:
# Queue individual event for processing
process_inbound_event.delay(str(event.id))
processed_count += 1
if processed_count > 0:
logger.info(f"Queued {processed_count} pending events for processing")
return {"queued": processed_count}
# =============================================================================
# HIS Survey Fetching Tasks
# =============================================================================
@shared_task
def fetch_his_surveys():
"""
Periodic task to fetch surveys from HIS system every 5 minutes.
This task:
1. Connects to configured HIS API endpoints
2. Fetches discharged patients for the last 5-minute window
3. Processes each patient using HISAdapter
4. Creates and sends surveys via SMS
Scheduled to run every 5 minutes via Celery Beat.
Returns:
dict: Summary of fetched and processed surveys
"""
from datetime import timedelta
from apps.integrations.models import IntegrationConfig, SourceSystem
from apps.integrations.services.his_adapter import HISAdapter
from apps.integrations.services.his_client import HISClient, HISClientFactory
logger.info("Starting HIS survey fetch task")
result = {
"success": False,
"clients_processed": 0,
"patients_fetched": 0,
"surveys_created": 0,
"surveys_sent": 0,
"errors": [],
"details": [],
}
try:
# Get all active HIS clients
clients = HISClientFactory.get_all_active_clients()
if not clients:
msg = "No active HIS configurations found"
logger.warning(msg)
result["errors"].append(msg)
return result
# Calculate fetch window (last 5 minutes)
# This ensures we don't miss any patients due to timing issues
fetch_since = timezone.now() - timedelta(minutes=5)
logger.info(f"Fetching discharged patients since {fetch_since}")
for client in clients:
client_result = {
"config": client.config.name if client.config else "Default",
"patients_fetched": 0,
"surveys_created": 0,
"surveys_sent": 0,
"errors": [],
}
try:
# Test connection first
connection_test = client.test_connection()
if not connection_test["success"]:
error_msg = f"HIS connection failed for {client_result['config']}: {connection_test['message']}"
logger.error(error_msg)
client_result["errors"].append(error_msg)
result["details"].append(client_result)
continue
logger.info(f"Fetching discharged patients from {client_result['config']} since {fetch_since}")
# Fetch discharged patients for the 5-minute window
patients = client.fetch_discharged_patients(
since=fetch_since,
limit=100, # Max 100 patients per fetch
)
client_result["patients_fetched"] = len(patients)
result["patients_fetched"] += len(patients)
if not patients:
logger.info(f"No new discharged patients found in {client_result['config']}")
result["details"].append(client_result)
continue
logger.info(f"Fetched {len(patients)} patients from {client_result['config']}")
# Process each patient
for patient_data in patients:
try:
# Ensure patient data is in proper HIS format
if isinstance(patient_data, dict):
if "FetchPatientDataTimeStampList" not in patient_data:
# Wrap in proper format if needed
patient_data = {
"FetchPatientDataTimeStampList": [patient_data],
"FetchPatientDataTimeStampVisitDataList": [],
"Code": 200,
"Status": "Success",
}
# Process using HISAdapter
process_result = HISAdapter.process_his_data(patient_data)
if process_result["success"]:
client_result["surveys_created"] += 1
result["surveys_created"] += 1
if process_result.get("survey_sent"):
client_result["surveys_sent"] += 1
result["surveys_sent"] += 1
else:
error_msg = f"Failed to process patient: {process_result.get('message', 'Unknown error')}"
logger.warning(error_msg)
client_result["errors"].append(error_msg)
except Exception as e:
error_msg = f"Error processing patient data: {str(e)}"
logger.error(error_msg, exc_info=True)
client_result["errors"].append(error_msg)
# Update last sync timestamp
if client.config:
client.config.last_sync_at = timezone.now()
client.config.save(update_fields=["last_sync_at"])
result["clients_processed"] += 1
except Exception as e:
error_msg = f"Error processing HIS client {client_result['config']}: {str(e)}"
logger.error(error_msg, exc_info=True)
client_result["errors"].append(error_msg)
result["details"].append(client_result)
result["success"] = True
logger.info(
f"HIS survey fetch completed: {result['clients_processed']} clients, "
f"{result['patients_fetched']} patients, "
f"{result['surveys_created']} surveys created, "
f"{result['surveys_sent']} surveys sent"
)
except Exception as e:
error_msg = f"Fatal error in fetch_his_surveys task: {str(e)}"
logger.error(error_msg, exc_info=True)
result["errors"].append(error_msg)
return result
@shared_task
def test_his_connection(config_id=None):
"""
Test connectivity to HIS system.
Args:
config_id: Optional IntegrationConfig ID. If not provided,
tests the default HIS configuration.
Returns:
dict: Connection test results
"""
from apps.integrations.models import IntegrationConfig, SourceSystem
from apps.integrations.services.his_client import HISClient
logger.info(f"Testing HIS connection for config_id={config_id}")
try:
if config_id:
config = IntegrationConfig.objects.filter(id=config_id, source_system=SourceSystem.HIS).first()
if not config:
return {"success": False, "message": f"HIS configuration with ID {config_id} not found"}
client = HISClient(config)
else:
client = HISClient()
result = client.test_connection()
logger.info(f"HIS connection test result: {result}")
return result
except Exception as e:
error_msg = f"Error testing HIS connection: {str(e)}"
logger.error(error_msg, exc_info=True)
return {"success": False, "message": error_msg}
@shared_task
def sync_his_survey_mappings():
"""
Sync survey template mappings from HIS system.
This task can be scheduled periodically to sync any mapping
configurations from the HIS system.
Returns:
dict: Sync results
"""
from apps.integrations.models import SurveyTemplateMapping
from apps.organizations.models import Hospital
logger.info("Starting HIS survey mappings sync")
result = {"success": True, "mappings_synced": 0, "errors": []}
try:
# Get all active mappings
mappings = SurveyTemplateMapping.objects.filter(is_active=True)
for mapping in mappings:
try:
# Validate mapping
if not mapping.survey_template:
result["errors"].append(f"Mapping {mapping.id} has no survey template")
continue
# Additional sync logic can be added here
# e.g., fetching updated mapping rules from HIS
result["mappings_synced"] += 1
except Exception as e:
result["errors"].append(f"Error syncing mapping {mapping.id}: {str(e)}")
logger.info(f"HIS survey mappings sync completed: {result['mappings_synced']} mappings")
except Exception as e:
error_msg = f"Error syncing HIS mappings: {str(e)}"
logger.error(error_msg, exc_info=True)
result["success"] = False
result["errors"].append(error_msg)
return result