HH/test_survey_analytics_enhanced.py

206 lines
7.3 KiB
Python

#!/usr/bin/env python
"""
Test script for enhanced survey analytics report generation
"""
import os
import sys
import django
# Setup Django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PX360.settings')
sys.path.insert(0, '/home/ismail/projects/HH')
django.setup()
from django.core.management import call_command
from io import StringIO
def test_survey_analytics():
"""Test the enhanced survey analytics command"""
print("=" * 80)
print("Testing Enhanced Survey Analytics Report Generation")
print("=" * 80)
print()
# Create output directory
output_dir = 'test_analytics_output'
os.makedirs(output_dir, exist_ok=True)
print(f"Output directory: {output_dir}")
print()
# Test 1: Basic report generation (Markdown only)
print("Test 1: Generating basic Markdown report...")
try:
out = StringIO()
call_command(
'generate_survey_analytics_report',
output_dir=output_dir,
stdout=out
)
print("✓ Basic Markdown report generated successfully")
print()
except Exception as e:
print(f"✗ Error generating basic report: {e}")
print()
return False
# Test 2: Generate JSON report
print("Test 2: Generating JSON report...")
try:
out = StringIO()
call_command(
'generate_survey_analytics_report',
'--json',
output_dir=output_dir,
stdout=out
)
print("✓ JSON report generated successfully")
print()
# Check if JSON file exists and is valid
json_path = os.path.join(output_dir, 'survey_analytics_data.json')
if os.path.exists(json_path):
import json
with open(json_path, 'r') as f:
data = json.load(f)
print(f" - JSON file size: {os.path.getsize(json_path)} bytes")
print(f" - Templates analyzed: {len(data.get('templates', []))}")
if data.get('templates'):
print(f" - Sample template: {data['templates'][0]['template_name']}")
print(f" - Questions in sample: {data['templates'][0].get('question_count', 0)}")
print(f" - Rankings available: {'rankings' in data['templates'][0]}")
print(f" - Insights available: {'insights' in data['templates'][0]}")
if 'insights' in data['templates'][0]:
print(f" - Number of insights: {len(data['templates'][0]['insights'])}")
print()
except Exception as e:
print(f"✗ Error generating JSON report: {e}")
import traceback
traceback.print_exc()
print()
return False
# Test 3: Generate HTML report
print("Test 3: Generating HTML report...")
try:
out = StringIO()
call_command(
'generate_survey_analytics_report',
'--html',
output_dir=output_dir,
stdout=out
)
print("✓ HTML report generated successfully")
print()
# Check if HTML file exists
html_path = os.path.join(output_dir, 'survey_analytics_report.html')
if os.path.exists(html_path):
print(f" - HTML file size: {os.path.getsize(html_path)} bytes")
with open(html_path, 'r') as f:
content = f.read()
print(f" - Contains ApexCharts: {'ApexCharts' in content}")
print(f" - Contains charts: {'chart' in content.lower()}")
print()
except Exception as e:
print(f"✗ Error generating HTML report: {e}")
import traceback
traceback.print_exc()
print()
return False
# Test 4: Test with specific template
print("Test 4: Generating report for specific template...")
try:
from apps.surveys.models import SurveyTemplate
template = SurveyTemplate.objects.filter(is_active=True).first()
if template:
out = StringIO()
call_command(
'generate_survey_analytics_report',
'--template', template.name,
output_dir=output_dir,
stdout=out
)
print(f"✓ Report generated for template: {template.name}")
print()
else:
print("⚠ No active survey templates found, skipping template-specific test")
print()
except Exception as e:
print(f"✗ Error generating template-specific report: {e}")
print()
return False
# Test 5: Verify new features
print("Test 5: Verifying new features...")
json_path = os.path.join(output_dir, 'survey_analytics_data.json')
if os.path.exists(json_path):
import json
with open(json_path, 'r') as f:
data = json.load(f)
if data.get('templates'):
template = data['templates'][0]
# Check for statistical analysis
if template.get('questions'):
question = template['questions'][0]
has_skewness = 'skewness' in question
has_kurtosis = 'kurtosis' in question
has_correlation = 'correlation_with_overall' in question
has_channel_performance = 'channel_performance' in question
print(f" - Statistical Analysis:")
print(f" ✓ Skewness: {has_skewness}")
print(f" ✓ Kurtosis: {has_kurtosis}")
print(f" ✓ Correlation: {has_correlation}")
print(f" ✓ Channel Performance: {has_channel_performance}")
print()
# Check for rankings
has_rankings = 'rankings' in template
if has_rankings:
rankings = template['rankings']
has_top5 = 'top_5_by_score' in rankings
has_bottom5 = 'bottom_5_by_score' in rankings
has_correlation_ranking = 'top_5_by_correlation' in rankings
has_skipped = 'most_skipped_5' in rankings
print(f" - Question Rankings:")
print(f" ✓ Top 5 by score: {has_top5}")
print(f" ✓ Bottom 5 by score: {has_bottom5}")
print(f" ✓ Top 5 by correlation: {has_correlation_ranking}")
print(f" ✓ Most skipped 5: {has_skipped}")
if has_top5:
print(f" - Top question: {rankings['top_5_by_score'][0]['question']}")
print()
# Check for insights
has_insights = 'insights' in template
if has_insights:
insights = template['insights']
print(f" - AI-Powered Insights:")
print(f" ✓ Insights generated: {len(insights)}")
for insight in insights[:3]: # Show first 3
print(f" - [{insight['severity']}] {insight['category']}: {insight['message'][:80]}...")
print()
print("=" * 80)
print("✓ All tests completed successfully!")
print("=" * 80)
print()
print(f"Output files generated in: {output_dir}/")
print(f" - survey_analytics_report.md")
print(f" - survey_analytics_data.json")
print(f" - survey_analytics_report.html")
print()
return True
if __name__ == '__main__':
success = test_survey_analytics()
sys.exit(0 if success else 1)