HH/apps/integrations/tasks.py
2026-03-28 14:03:56 +03:00

447 lines
16 KiB
Python

"""
Integrations Celery tasks
This module contains the core event processing logic that:
1. Processes inbound events from external systems
2. Finds matching journey instances
3. Completes journey stages
4. Triggers survey creation
5. Fetches surveys from HIS systems (every 5 minutes)
"""
import logging
from datetime import datetime
from celery import shared_task
from django.db import transaction
from django.utils import timezone
logger = logging.getLogger("apps.integrations")
@shared_task(bind=True, max_retries=3)
def process_inbound_event(self, event_id):
"""
Process an inbound integration event.
This is the core event processing task that:
1. Finds the journey instance by encounter_id
2. Finds the matching stage by trigger_event_code
3. Completes the stage
4. Creates survey instance if configured
5. Logs audit events
Args:
event_id: UUID of the InboundEvent to process
Returns:
dict: Processing result with status and details
"""
from apps.core.services import create_audit_log
from apps.integrations.models import InboundEvent
from apps.journeys.models import PatientJourneyInstance, PatientJourneyStageInstance, StageStatus
from apps.organizations.models import Department, Staff
try:
# Get the event
event = InboundEvent.objects.get(id=event_id)
event.mark_processing()
logger.info(f"Processing event {event.id}: {event.event_code} for encounter {event.encounter_id}")
# Find journey instance by encounter_id
try:
journey_instance = PatientJourneyInstance.objects.select_related(
"journey_template", "patient", "hospital"
).get(encounter_id=event.encounter_id)
except PatientJourneyInstance.DoesNotExist:
error_msg = f"No journey instance found for encounter {event.encounter_id}"
logger.warning(error_msg)
event.mark_ignored(error_msg)
return {"status": "ignored", "reason": error_msg}
# Find matching stage by trigger_event_code
matching_stages = journey_instance.stage_instances.filter(
stage_template__trigger_event_code=event.event_code,
status__in=[StageStatus.PENDING, StageStatus.IN_PROGRESS],
).select_related("stage_template")
if not matching_stages.exists():
error_msg = f"No pending stage found with trigger {event.event_code}"
logger.warning(error_msg)
event.mark_ignored(error_msg)
return {"status": "ignored", "reason": error_msg}
# Get the first matching stage
stage_instance = matching_stages.first()
# Extract staff and department from event payload
staff = None
department = None
if event.physician_license:
try:
staff = Staff.objects.get(license_number=event.physician_license, hospital=journey_instance.hospital)
except Staff.DoesNotExist:
logger.warning(f"Staff member not found with license: {event.physician_license}")
if event.department_code:
try:
department = Department.objects.get(code=event.department_code, hospital=journey_instance.hospital)
except Department.DoesNotExist:
logger.warning(f"Department not found: {event.department_code}")
# Complete the stage
with transaction.atomic():
success = stage_instance.complete(
event=event, staff=staff, department=department, metadata=event.payload_json
)
if success:
# Log stage completion
create_audit_log(
event_type="stage_completed",
description=f"Stage {stage_instance.stage_template.name} completed for encounter {event.encounter_id}",
content_object=stage_instance,
metadata={
"event_code": event.event_code,
"stage_name": stage_instance.stage_template.name,
"journey_type": journey_instance.journey_template.journey_type,
},
)
# Check if this is a discharge event
if event.event_code.upper() == "PATIENT_DISCHARGED":
logger.info(f"Discharge event received for encounter {event.encounter_id}")
# Mark journey as completed
journey_instance.status = "completed"
journey_instance.completed_at = timezone.now()
journey_instance.save()
# Check if post-discharge survey is enabled
if journey_instance.journey_template.send_post_discharge_survey:
logger.info(
f"Post-discharge survey enabled for journey {journey_instance.id}. "
f"Will send in {journey_instance.journey_template.post_discharge_survey_delay_hours} hour(s)"
)
# Queue post-discharge survey creation task with delay
from apps.surveys.tasks import create_post_discharge_survey
delay_hours = journey_instance.journey_template.post_discharge_survey_delay_hours
delay_seconds = delay_hours * 3600
create_post_discharge_survey.apply_async(
args=[str(journey_instance.id)], countdown=delay_seconds
)
logger.info(
f"Queued post-discharge survey for journey {journey_instance.id} (delay: {delay_hours}h)"
)
else:
logger.info(f"Post-discharge survey disabled for journey {journey_instance.id}")
# Mark event as processed
event.mark_processed()
logger.info(
f"Successfully processed event {event.id}: Completed stage {stage_instance.stage_template.name}"
)
return {
"status": "processed",
"stage_completed": stage_instance.stage_template.name,
"journey_completion": journey_instance.get_completion_percentage(),
}
else:
error_msg = "Failed to complete stage"
event.mark_failed(error_msg)
return {"status": "failed", "reason": error_msg}
except InboundEvent.DoesNotExist:
error_msg = f"Event {event_id} not found"
logger.error(error_msg)
return {"status": "error", "reason": error_msg}
except Exception as e:
error_msg = f"Error processing event: {str(e)}"
logger.error(error_msg, exc_info=True)
try:
event.mark_failed(error_msg)
except:
pass
# Retry the task
raise self.retry(exc=e, countdown=60 * (self.request.retries + 1))
@shared_task
def process_pending_events():
"""
Periodic task to process pending events.
This task runs every minute (configured in config/celery.py)
and processes all pending events.
"""
from apps.integrations.models import InboundEvent
pending_events = InboundEvent.objects.filter(status="pending").order_by("received_at")[
:100
] # Process max 100 at a time
processed_count = 0
for event in pending_events:
# Queue individual event for processing
process_inbound_event.delay(str(event.id))
processed_count += 1
if processed_count > 0:
logger.info(f"Queued {processed_count} pending events for processing")
return {"queued": processed_count}
# =============================================================================
# HIS Survey Fetching Tasks
# =============================================================================
def _parse_his_date(date_str):
"""Parse HIS date format 'DD-Mon-YYYY HH:MM:SS' to timezone-aware datetime."""
if not date_str:
return None
for fmt in ("%d-%b-%Y %H:%M:%S", "%d-%b-%Y %H:%M"):
try:
naive = datetime.strptime(date_str, fmt)
return timezone.make_aware(naive)
except ValueError:
continue
return None
@shared_task
def fetch_his_surveys(from_date_str=None, to_date_str=None):
"""
Periodic task to fetch patient data from HIS system every 5 minutes.
This task:
1. Connects to configured HIS API endpoints
2. Fetches all patients from the last 5 minutes (or custom date range)
3. Saves patient and visit data for every patient (all types)
4. Creates surveys only for patients whose visit is complete:
- ED/IP: DischargeDate present
- OP: last visit event is older than configured delay hours
Args:
from_date_str: Optional override - fetch from this date (DD-Mon-YYYY HH:MM:SS)
to_date_str: Optional override - fetch until this date (DD-Mon-YYYY HH:MM:SS)
Scheduled to run every 5 minutes via Celery Beat.
Returns:
dict: Summary of fetched and processed data
"""
from datetime import datetime, timedelta
from apps.integrations.services.his_adapter import HISAdapter
from apps.integrations.services.his_client import HISClient, HISClientFactory
logger.info("Starting HIS patient data fetch task")
result = {
"success": False,
"clients_processed": 0,
"total_patients": 0,
"visits_saved": 0,
"surveys_created": 0,
"errors": [],
"details": [],
}
try:
clients = HISClientFactory.get_all_active_clients()
if not clients:
msg = "No active HIS configurations found"
logger.warning(msg)
result["errors"].append(msg)
return result
if from_date_str and to_date_str:
fetch_since = _parse_his_date(from_date_str)
fetch_until = _parse_his_date(to_date_str)
if not fetch_since or not fetch_until:
msg = f"Invalid date format: from_date={from_date_str}, to_date={to_date_str}"
result["errors"].append(msg)
return result
logger.info(f"Using custom date range: {fetch_since} to {fetch_until}")
else:
fetch_since = timezone.now() - timedelta(minutes=5)
fetch_until = None
logger.info(f"Fetching patient data since {fetch_since}")
for client in clients:
client_result = {
"config": client.config.name if client.config else "Default",
"total_patients": 0,
"visits_saved": 0,
"surveys_created": 0,
"errors": [],
}
try:
connection_test = client.test_connection()
if not connection_test["success"]:
error_msg = f"HIS connection failed for {client_result['config']}: {connection_test['message']}"
logger.error(error_msg)
client_result["errors"].append(error_msg)
result["details"].append(client_result)
continue
logger.info(f"Fetching patient data from {client_result['config']}")
his_data = client.fetch_patient_data(since=fetch_since, until=fetch_until)
if not his_data:
logger.info(f"No data returned from {client_result['config']}")
result["details"].append(client_result)
continue
patient_list = his_data.get("FetchPatientDataTimeStampList", [])
client_result["total_patients"] = len(patient_list)
result["total_patients"] += len(patient_list)
if not patient_list:
logger.info(f"No patients found in {client_result['config']}")
result["details"].append(client_result)
continue
logger.info(f"Fetched {len(patient_list)} patients from {client_result['config']}")
process_result = HISAdapter.process_his_response(his_data)
client_result["visits_saved"] = process_result.get("visits_saved", 0)
client_result["surveys_created"] = process_result.get("surveys_created", 0)
client_result["errors"] = process_result.get("errors", [])
result["visits_saved"] += client_result["visits_saved"]
result["surveys_created"] += client_result["surveys_created"]
result["errors"].extend(client_result["errors"])
if client.config:
client.config.last_sync_at = timezone.now()
client.config.save(update_fields=["last_sync_at"])
result["clients_processed"] += 1
except Exception as e:
error_msg = f"Error processing HIS client {client_result['config']}: {str(e)}"
logger.error(error_msg, exc_info=True)
client_result["errors"].append(error_msg)
result["details"].append(client_result)
result["success"] = True
logger.info(
f"HIS fetch completed: {result['clients_processed']} clients, "
f"{result['total_patients']} patients, "
f"{result['visits_saved']} visits saved, "
f"{result['surveys_created']} surveys created"
)
except Exception as e:
error_msg = f"Fatal error in fetch_his_surveys task: {str(e)}"
logger.error(error_msg, exc_info=True)
result["errors"].append(error_msg)
return result
@shared_task
def test_his_connection(config_id=None):
"""
Test connectivity to HIS system.
Args:
config_id: Optional IntegrationConfig ID. If not provided,
tests the default HIS configuration.
Returns:
dict: Connection test results
"""
from apps.integrations.models import IntegrationConfig, SourceSystem
from apps.integrations.services.his_client import HISClient
logger.info(f"Testing HIS connection for config_id={config_id}")
try:
if config_id:
config = IntegrationConfig.objects.filter(id=config_id, source_system=SourceSystem.HIS).first()
if not config:
return {"success": False, "message": f"HIS configuration with ID {config_id} not found"}
client = HISClient(config)
else:
client = HISClient()
result = client.test_connection()
logger.info(f"HIS connection test result: {result}")
return result
except Exception as e:
error_msg = f"Error testing HIS connection: {str(e)}"
logger.error(error_msg, exc_info=True)
return {"success": False, "message": error_msg}
@shared_task
def sync_his_survey_mappings():
"""
Sync survey template mappings from HIS system.
This task can be scheduled periodically to sync any mapping
configurations from the HIS system.
Returns:
dict: Sync results
"""
from apps.integrations.models import SurveyTemplateMapping
from apps.organizations.models import Hospital
logger.info("Starting HIS survey mappings sync")
result = {"success": True, "mappings_synced": 0, "errors": []}
try:
# Get all active mappings
mappings = SurveyTemplateMapping.objects.filter(is_active=True)
for mapping in mappings:
try:
# Validate mapping
if not mapping.survey_template:
result["errors"].append(f"Mapping {mapping.id} has no survey template")
continue
# Additional sync logic can be added here
# e.g., fetching updated mapping rules from HIS
result["mappings_synced"] += 1
except Exception as e:
result["errors"].append(f"Error syncing mapping {mapping.id}: {str(e)}")
logger.info(f"HIS survey mappings sync completed: {result['mappings_synced']} mappings")
except Exception as e:
error_msg = f"Error syncing HIS mappings: {str(e)}"
logger.error(error_msg, exc_info=True)
result["success"] = False
result["errors"].append(error_msg)
return result