357 lines
11 KiB
Python
357 lines
11 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
Test script for survey status transitions.
|
|
|
|
Tests:
|
|
1. in_progress status - when patient starts answering
|
|
2. abandoned status - automatic detection
|
|
3. All status transitions
|
|
4. Tracking events
|
|
"""
|
|
import os
|
|
import sys
|
|
import django
|
|
|
|
# Setup Django
|
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PX360.settings')
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
django.setup()
|
|
|
|
from django.utils import timezone
|
|
from datetime import timedelta
|
|
from apps.surveys.models import SurveyInstance, SurveyQuestion, SurveyResponse, SurveyTracking
|
|
from apps.surveys.tasks import mark_abandoned_surveys
|
|
from apps.patients.models import Patient
|
|
from apps.hospitals.models import Hospital
|
|
from apps.journeys.models import JourneyTemplate, JourneyStageTemplate, PatientJourneyInstance
|
|
from django.test import Client
|
|
import uuid
|
|
|
|
|
|
def print_header(text):
|
|
"""Print formatted header"""
|
|
print("\n" + "="*70)
|
|
print(f" {text}")
|
|
print("="*70 + "\n")
|
|
|
|
|
|
def print_success(text):
|
|
"""Print success message"""
|
|
print(f"✓ {text}")
|
|
|
|
|
|
def print_error(text):
|
|
"""Print error message"""
|
|
print(f"✗ {text}")
|
|
|
|
|
|
def test_in_progress_transition():
|
|
"""Test transition from 'viewed' to 'in_progress' status"""
|
|
print_header("TEST 1: in_progress Status Transition")
|
|
|
|
# Get or create test survey
|
|
survey = SurveyInstance.objects.filter(status='viewed').first()
|
|
|
|
if not survey:
|
|
print_error("No survey with 'viewed' status found")
|
|
return False
|
|
|
|
print(f"Testing survey: {survey.survey_template.name}")
|
|
print(f"Current status: {survey.status}")
|
|
|
|
# Simulate tracking API call
|
|
client = Client()
|
|
|
|
# Call the tracking endpoint
|
|
response = client.post(
|
|
f'/surveys/s/{survey.access_token}/track-start/',
|
|
HTTP_X_CSRFTOKEN='test'
|
|
)
|
|
|
|
# Refresh survey from database
|
|
survey.refresh_from_db()
|
|
|
|
if response.status_code == 200:
|
|
print_success(f"API returned 200: {response.json()}")
|
|
else:
|
|
print_error(f"API returned {response.status_code}")
|
|
return False
|
|
|
|
# Check status changed to in_progress
|
|
if survey.status == 'in_progress':
|
|
print_success(f"Status changed to 'in_progress': {survey.status}")
|
|
else:
|
|
print_error(f"Status did not change. Current: {survey.status}")
|
|
return False
|
|
|
|
# Check tracking event was created
|
|
tracking_events = survey.tracking_events.filter(event_type='survey_started')
|
|
if tracking_events.exists():
|
|
print_success(f"Tracking event created: survey_started")
|
|
print(f" Event count: {tracking_events.count()}")
|
|
else:
|
|
print_error("No tracking event found")
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
def test_abandoned_surveys():
|
|
"""Test abandoned survey detection"""
|
|
print_header("TEST 2: Abandoned Survey Detection")
|
|
|
|
# Create test surveys that should be marked as abandoned
|
|
now = timezone.now()
|
|
|
|
# Create a survey that was opened 26 hours ago (should be abandoned)
|
|
patient = Patient.objects.first()
|
|
hospital = Hospital.objects.first()
|
|
|
|
if not patient or not hospital:
|
|
print_error("No patient or hospital found")
|
|
return False
|
|
|
|
# Get or create a survey template
|
|
from apps.surveys.models import SurveyTemplate
|
|
template = SurveyTemplate.objects.filter(is_active=True).first()
|
|
|
|
if not template:
|
|
print_error("No active survey template found")
|
|
return False
|
|
|
|
# Create test survey with viewed status, opened 26 hours ago
|
|
test_survey = SurveyInstance.objects.create(
|
|
survey_template=template,
|
|
patient=patient,
|
|
hospital=hospital,
|
|
status='viewed',
|
|
sent_at=now - timedelta(hours=26),
|
|
opened_at=now - timedelta(hours=26),
|
|
last_opened_at=now - timedelta(hours=26),
|
|
token_expires_at=now + timedelta(days=7),
|
|
access_token=uuid.uuid4().hex
|
|
)
|
|
|
|
print(f"Created test survey with status: {test_survey.status}")
|
|
print(f"Opened: 26 hours ago (should be marked as abandoned)")
|
|
|
|
# Run the abandoned survey task
|
|
result = mark_abandoned_surveys(hours=24)
|
|
|
|
print(f"\nTask result: {result}")
|
|
|
|
# Refresh survey
|
|
test_survey.refresh_from_db()
|
|
|
|
# Check if marked as abandoned
|
|
if test_survey.status == 'abandoned':
|
|
print_success(f"Survey marked as abandoned: {test_survey.status}")
|
|
else:
|
|
print_error(f"Survey not marked as abandoned. Current: {test_survey.status}")
|
|
return False
|
|
|
|
# Check tracking event
|
|
abandoned_events = test_survey.tracking_events.filter(event_type='survey_abandoned')
|
|
if abandoned_events.exists():
|
|
print_success("Abandonment tracking event created")
|
|
event = abandoned_events.first()
|
|
print(f" Questions answered: {event.current_question}")
|
|
print(f" Time since open: {event.metadata.get('time_since_open_hours', 'N/A')} hours")
|
|
else:
|
|
print_error("No abandonment tracking event found")
|
|
return False
|
|
|
|
# Clean up
|
|
test_survey.delete()
|
|
print_success("Test survey cleaned up")
|
|
|
|
return True
|
|
|
|
|
|
def test_status_flow():
|
|
"""Test complete status flow"""
|
|
print_header("TEST 3: Complete Status Flow")
|
|
|
|
# Create a fresh survey
|
|
patient = Patient.objects.first()
|
|
hospital = Hospital.objects.first()
|
|
template = SurveyTemplate.objects.filter(is_active=True).first()
|
|
|
|
if not all([patient, hospital, template]):
|
|
print_error("Missing required data")
|
|
return False
|
|
|
|
# Expected flow: pending -> sent -> viewed -> in_progress -> completed
|
|
# Alternative flow: pending -> sent -> viewed -> in_progress -> abandoned
|
|
|
|
# Create survey
|
|
survey = SurveyInstance.objects.create(
|
|
survey_template=template,
|
|
patient=patient,
|
|
hospital=hospital,
|
|
status='pending',
|
|
sent_at=timezone.now(),
|
|
token_expires_at=timezone.now() + timedelta(days=7),
|
|
access_token=uuid.uuid4().hex
|
|
)
|
|
|
|
print("Testing status flow:")
|
|
print(f" 1. Initial status: {survey.status}")
|
|
|
|
# Mark as sent
|
|
survey.status = 'sent'
|
|
survey.save()
|
|
print(f" 2. Sent status: {survey.status}")
|
|
|
|
# Mark as viewed (simulating page view)
|
|
survey.status = 'viewed'
|
|
survey.opened_at = timezone.now()
|
|
survey.last_opened_at = timezone.now()
|
|
survey.save()
|
|
print(f" 3. Viewed status: {survey.status}")
|
|
|
|
# Mark as in_progress (simulating first interaction)
|
|
survey.status = 'in_progress'
|
|
survey.save()
|
|
print(f" 4. In Progress status: {survey.status}")
|
|
|
|
# Track events
|
|
SurveyTracking.track_event(
|
|
survey,
|
|
'survey_started',
|
|
metadata={'test': True}
|
|
)
|
|
print(f" 5. Tracked survey_started event")
|
|
|
|
# Mark as completed
|
|
survey.status = 'completed'
|
|
survey.completed_at = timezone.now()
|
|
survey.time_spent_seconds = 300 # 5 minutes
|
|
survey.save()
|
|
print(f" 6. Completed status: {survey.status}")
|
|
|
|
# Check all tracking events
|
|
events = survey.tracking_events.all()
|
|
print(f"\n Tracking events: {events.count()}")
|
|
for event in events:
|
|
print(f" - {event.event_type} at {event.created_at}")
|
|
|
|
# Clean up
|
|
survey.delete()
|
|
print_success("Test survey cleaned up")
|
|
|
|
return True
|
|
|
|
|
|
def test_abandoned_command():
|
|
"""Test the management command"""
|
|
print_header("TEST 4: Management Command Test")
|
|
|
|
# Create test surveys
|
|
patient = Patient.objects.first()
|
|
hospital = Hospital.objects.first()
|
|
template = SurveyTemplate.objects.filter(is_active=True).first()
|
|
|
|
if not all([patient, hospital, template]):
|
|
print_error("Missing required data")
|
|
return False
|
|
|
|
now = timezone.now()
|
|
|
|
# Create multiple test surveys
|
|
test_surveys = []
|
|
for i in range(3):
|
|
survey = SurveyInstance.objects.create(
|
|
survey_template=template,
|
|
patient=patient,
|
|
hospital=hospital,
|
|
status='viewed' if i % 2 == 0 else 'in_progress',
|
|
sent_at=now - timedelta(hours=30),
|
|
opened_at=now - timedelta(hours=30),
|
|
last_opened_at=now - timedelta(hours=30),
|
|
token_expires_at=now + timedelta(days=7),
|
|
access_token=uuid.uuid4().hex[:16]
|
|
)
|
|
test_surveys.append(survey)
|
|
print(f"Created test survey {i+1}: {survey.status}, opened 30h ago")
|
|
|
|
# Test dry run
|
|
print("\nTesting dry-run mode...")
|
|
# Note: This would normally be run via manage.py
|
|
# For testing, we'll just call the task
|
|
result = mark_abandoned_surveys(hours=24)
|
|
print(f"Result: {result}")
|
|
|
|
# Check results
|
|
marked_count = result.get('marked', 0)
|
|
print_success(f"Marked {marked_count} surveys as abandoned")
|
|
|
|
# Verify all were marked
|
|
all_abandoned = all(s.status == 'abandoned' for s in test_surveys)
|
|
|
|
if all_abandoned:
|
|
print_success("All test surveys marked as abandoned")
|
|
else:
|
|
print_error("Not all surveys were marked as abandoned")
|
|
for i, survey in enumerate(test_surveys):
|
|
print(f" Survey {i+1}: {survey.status}")
|
|
|
|
# Clean up
|
|
for survey in test_surveys:
|
|
survey.delete()
|
|
print_success("Test surveys cleaned up")
|
|
|
|
return all_abandoned
|
|
|
|
|
|
def main():
|
|
"""Run all tests"""
|
|
print_header("Survey Status Transitions Test Suite")
|
|
print("This script tests the survey status tracking implementation:")
|
|
print(" 1. in_progress transition (when patient starts answering)")
|
|
print(" 2. abandoned detection (automatic)")
|
|
print(" 3. Complete status flow")
|
|
print(" 4. Management command")
|
|
|
|
results = []
|
|
|
|
try:
|
|
# Test 1: in_progress transition
|
|
results.append(('in_progress Transition', test_in_progress_transition()))
|
|
|
|
# Test 2: Abandoned detection
|
|
results.append(('Abandoned Detection', test_abandoned_surveys()))
|
|
|
|
# Test 3: Status flow
|
|
results.append(('Status Flow', test_status_flow()))
|
|
|
|
# Test 4: Management command
|
|
results.append(('Management Command', test_abandoned_command()))
|
|
|
|
except Exception as e:
|
|
print_error(f"Test failed with error: {str(e)}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
# Print summary
|
|
print_header("Test Summary")
|
|
passed = sum(1 for _, result in results if result)
|
|
total = len(results)
|
|
|
|
for test_name, result in results:
|
|
status = "✓ PASS" if result else "✗ FAIL"
|
|
print(f"{status}: {test_name}")
|
|
|
|
print(f"\nTotal: {passed}/{total} tests passed")
|
|
|
|
if passed == total:
|
|
print_success("All tests passed!")
|
|
return 0
|
|
else:
|
|
print_error(f"{total - passed} test(s) failed")
|
|
return 1
|
|
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(main())
|