585 lines
22 KiB
Python
585 lines
22 KiB
Python
"""
|
|
AI Service - Base class for all AI interactions using LiteLLM
|
|
|
|
This module provides a unified interface for AI operations using LiteLLM
|
|
with OpenRouter as the provider. This replaces the stub AI engine.
|
|
|
|
Features:
|
|
- Complaint analysis (severity, priority classification)
|
|
- Chat completion for general AI tasks
|
|
- Sentiment analysis
|
|
- Entity extraction
|
|
- Language detection
|
|
"""
|
|
import os
|
|
import json
|
|
import logging
|
|
from typing import Dict, List, Optional, Any
|
|
|
|
from django.conf import settings
|
|
from django.core.cache import cache
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class AIServiceError(Exception):
|
|
"""Custom exception for AI service errors"""
|
|
pass
|
|
|
|
|
|
class AIService:
|
|
"""
|
|
Base AI Service class using LiteLLM with OpenRouter.
|
|
|
|
This is the single source of truth for all AI interactions in the application.
|
|
"""
|
|
|
|
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
|
|
OPENROUTER_API_KEY = "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"
|
|
# Default configuration
|
|
DEFAULT_MODEL = "openrouter/xiaomi/mimo-v2-flash:free"
|
|
DEFAULT_TEMPERATURE = 0.3
|
|
DEFAULT_MAX_TOKENS = 500
|
|
DEFAULT_TIMEOUT = 30
|
|
|
|
# Severity choices
|
|
SEVERITY_CHOICES = ['low', 'medium', 'high', 'critical']
|
|
|
|
# Priority choices
|
|
PRIORITY_CHOICES = ['low', 'medium', 'high']
|
|
|
|
|
|
@classmethod
|
|
def _get_api_key(cls) -> str:
|
|
"""Get OpenRouter API key from settings"""
|
|
# Use 'or' operator to fall back to DEFAULT_API_KEY when setting is empty or not set
|
|
api_key = cls.OPENROUTER_API_KEY
|
|
os.environ["OPENROUTER_API_KEY"] = api_key
|
|
os.environ["OPENROUTER_API_BASE"] = cls.OPENROUTER_BASE_URL
|
|
return api_key
|
|
|
|
@classmethod
|
|
def _get_model(cls) -> str:
|
|
"""Get AI model from settings"""
|
|
return getattr(settings, 'AI_MODEL') or cls.DEFAULT_MODEL
|
|
|
|
@classmethod
|
|
def _get_temperature(cls) -> float:
|
|
"""Get AI temperature from settings"""
|
|
return float(getattr(settings, 'AI_TEMPERATURE')) or cls.DEFAULT_TEMPERATURE
|
|
|
|
@classmethod
|
|
def _get_max_tokens(cls) -> int:
|
|
"""Get max tokens from settings"""
|
|
return int(getattr(settings, 'AI_MAX_TOKENS')) or cls.DEFAULT_MAX_TOKENS
|
|
@classmethod
|
|
def _get_complaint_categories(cls) -> List[str]:
|
|
"""Get complaint categories from settings"""
|
|
from apps.complaints.models import ComplaintCategory
|
|
|
|
return ComplaintCategory.objects.all().values_list('name_en', flat=True)
|
|
|
|
@classmethod
|
|
def _get_complaint_sub_categories(cls, category) -> List[str]:
|
|
"""Get complaint subcategories for a given category name"""
|
|
from apps.complaints.models import ComplaintCategory
|
|
if category:
|
|
try:
|
|
# Find the category by name and get its subcategories
|
|
category_obj = ComplaintCategory.objects.filter(name_en=category).first()
|
|
if category_obj:
|
|
return ComplaintCategory.objects.filter(parent=category_obj).values_list('name_en', flat=True)
|
|
except Exception as e:
|
|
logger.error(f"Error fetching subcategories: {e}")
|
|
return []
|
|
|
|
@classmethod
|
|
def _get_all_categories_with_subcategories(cls) -> Dict[str, List[str]]:
|
|
"""Get all categories with their subcategories in a structured format"""
|
|
from apps.complaints.models import ComplaintCategory
|
|
|
|
result = {}
|
|
try:
|
|
# Get all parent categories (no parent or parent is null)
|
|
parent_categories = ComplaintCategory.objects.filter(parent__isnull=True).all()
|
|
|
|
for category in parent_categories:
|
|
# Get subcategories for this parent
|
|
subcategories = list(
|
|
ComplaintCategory.objects.filter(parent=category).values_list('name_en', flat=True)
|
|
)
|
|
result[category.name_en] = subcategories if subcategories else []
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error fetching categories with subcategories: {e}")
|
|
|
|
return result
|
|
|
|
@classmethod
|
|
def _get_hospital_departments(cls, hospital_id: int) -> List[str]:
|
|
"""Get all departments for a specific hospital"""
|
|
from apps.organizations.models import Department
|
|
|
|
try:
|
|
departments = Department.objects.filter(
|
|
hospital_id=hospital_id,
|
|
status='active'
|
|
).values_list('name', flat=True)
|
|
return list(departments)
|
|
except Exception as e:
|
|
logger.error(f"Error fetching hospital departments: {e}")
|
|
return []
|
|
|
|
|
|
@classmethod
|
|
def chat_completion(
|
|
cls,
|
|
prompt: str,
|
|
model: Optional[str] = None,
|
|
temperature: Optional[float] = None,
|
|
max_tokens: Optional[int] = None,
|
|
system_prompt: Optional[str] = None,
|
|
response_format: Optional[str] = None
|
|
) -> str:
|
|
"""
|
|
Perform a chat completion using LiteLLM.
|
|
|
|
Args:
|
|
prompt: User prompt
|
|
model: AI model (uses default if not provided)
|
|
temperature: Temperature for randomness (uses default if not provided)
|
|
max_tokens: Maximum tokens to generate
|
|
system_prompt: System prompt to set context
|
|
response_format: Response format ('text' or 'json_object')
|
|
|
|
Returns:
|
|
Generated text response
|
|
|
|
Raises:
|
|
AIServiceError: If API call fails
|
|
"""
|
|
try:
|
|
from litellm import completion
|
|
|
|
api_key = cls._get_api_key()
|
|
|
|
model_name = model or cls._get_model()
|
|
temp = temperature if temperature is not None else cls._get_temperature()
|
|
max_tok = max_tokens or cls._get_max_tokens()
|
|
|
|
# Build messages
|
|
messages = []
|
|
if system_prompt:
|
|
messages.append({"role": "system", "content": system_prompt})
|
|
messages.append({"role": "user", "content": prompt})
|
|
|
|
# Build kwargs
|
|
kwargs = {
|
|
"model": "openrouter/xiaomi/mimo-v2-flash:free",
|
|
"messages": messages
|
|
}
|
|
|
|
if response_format:
|
|
kwargs["response_format"] = {"type": response_format}
|
|
|
|
logger.info(f"AI Request: model={model_name}, temp={temp}")
|
|
|
|
response = completion(**kwargs)
|
|
|
|
content = response.choices[0].message.content
|
|
logger.info(f"AI Response: length={len(content)}")
|
|
|
|
return content
|
|
|
|
except Exception as e:
|
|
logger.error(f"AI service error: {str(e)}")
|
|
raise AIServiceError(f"Failed to get AI response: {str(e)}")
|
|
|
|
@classmethod
|
|
def analyze_complaint(
|
|
cls,
|
|
title: Optional[str] = None,
|
|
description: str = "",
|
|
category: Optional[str] = None,
|
|
hospital_id: Optional[int] = None
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Analyze a complaint and determine title, severity, priority, category, subcategory, and department.
|
|
|
|
Args:
|
|
title: Complaint title (optional, will be generated if not provided)
|
|
description: Complaint description
|
|
category: Complaint category
|
|
hospital_id: Hospital ID to fetch departments
|
|
|
|
Returns:
|
|
Dictionary with analysis:
|
|
{
|
|
'title': str, # Generated or provided title
|
|
'short_description': str, # 2-3 sentence summary of the complaint
|
|
'severity': 'low' | 'medium' | 'high' | 'critical',
|
|
'priority': 'low' | 'medium' | 'high',
|
|
'category': str, # Name of the category
|
|
'subcategory': str, # Name of the subcategory
|
|
'department': str, # Name of the department
|
|
'reasoning': str # Explanation for the classification
|
|
}
|
|
"""
|
|
# Check cache first
|
|
cache_key = f"complaint_analysis:{hash(str(title) + description + str(hospital_id))}"
|
|
cached_result = cache.get(cache_key)
|
|
if cached_result:
|
|
logger.info("Using cached complaint analysis")
|
|
return cached_result
|
|
|
|
# Get categories with subcategories
|
|
categories_with_subcategories = cls._get_all_categories_with_subcategories()
|
|
|
|
# Format categories for the prompt
|
|
categories_text = ""
|
|
for cat, subcats in categories_with_subcategories.items():
|
|
if subcats:
|
|
categories_text += f"- {cat} (subcategories: {', '.join(subcats)})\n"
|
|
else:
|
|
categories_text += f"- {cat}\n"
|
|
|
|
# Get hospital departments if hospital_id is provided
|
|
departments_text = ""
|
|
if hospital_id:
|
|
departments = cls._get_hospital_departments(hospital_id)
|
|
if departments:
|
|
departments_text = f"\nAvailable Departments for this hospital:\n"
|
|
for dept in departments:
|
|
departments_text += f"- {dept}\n"
|
|
departments_text += "\n"
|
|
|
|
# Build prompt
|
|
title_text = f"Complaint Title: {title}\n" if title else ""
|
|
prompt = f"""Analyze this complaint and classify its severity, priority, category, subcategory, and department.
|
|
|
|
Complaint Description: {description}
|
|
{title_text}Current Category: {category or 'not specified'}{departments_text}Severity Classification (choose one):
|
|
- low: Minor issues, no impact on patient care, routine matters
|
|
- medium: Moderate issues, some patient dissatisfaction, not urgent
|
|
- high: Serious issues, significant patient impact, requires timely attention
|
|
- critical: Emergency, immediate threat to patient safety, requires instant action
|
|
|
|
Priority Classification (choose one):
|
|
- low: Can be addressed within 1-2 weeks
|
|
- medium: Should be addressed within 3-5 days
|
|
- high: Requires immediate attention (within 24 hours)
|
|
|
|
Available Categories and Subcategories:
|
|
{categories_text}
|
|
|
|
Instructions:
|
|
1. If no title is provided, generate a concise title (max 10 words) that summarizes the complaint in BOTH English and Arabic
|
|
2. Generate a short_description (2-3 sentences) that captures the main issue and context in BOTH English and Arabic
|
|
3. Select the most appropriate category from the list above
|
|
4. If the selected category has subcategories, choose the most relevant one
|
|
5. If a category has no subcategories, leave the subcategory field empty
|
|
6. Select the most appropriate department from the hospital's departments (if available)
|
|
7. If no departments are available or department is unclear, leave the department field empty
|
|
8. Generate a suggested_action (2-3 sentences) with specific, actionable steps to address this complaint in BOTH English and Arabic
|
|
|
|
IMPORTANT: ALL TEXT FIELDS MUST BE PROVIDED IN BOTH ENGLISH AND ARABIC
|
|
- title: Provide in both English and Arabic
|
|
- short_description: Provide in both English and Arabic
|
|
- suggested_action: Provide in both English and Arabic
|
|
- reasoning: Provide in both English and Arabic
|
|
|
|
Provide your analysis in JSON format:
|
|
{{
|
|
"title_en": "concise title in English summarizing the complaint (max 10 words)",
|
|
"title_ar": "العنوان بالعربية",
|
|
"short_description_en": "2-3 sentence summary in English of the complaint that captures the main issue and context",
|
|
"short_description_ar": "ملخص من 2-3 جمل بالعربية",
|
|
"severity": "low|medium|high|critical",
|
|
"priority": "low|medium|high",
|
|
"category": "exact category name from the list above",
|
|
"subcategory": "exact subcategory name from the chosen category, or empty string if not applicable",
|
|
"department": "exact department name from the hospital's departments, or empty string if not applicable",
|
|
"suggested_action_en": "2-3 specific, actionable steps in English to address this complaint",
|
|
"suggested_action_ar": "خطوات محددة وعمليه بالعربية",
|
|
"reasoning_en": "Brief explanation in English of your classification (2-3 sentences)",
|
|
"reasoning_ar": "شرح مختصر بالعربية"
|
|
}}"""
|
|
|
|
system_prompt = """You are a healthcare complaint analysis expert fluent in both English and Arabic.
|
|
Your job is to classify complaints based on their potential impact on patient care and safety.
|
|
Be conservative - when in doubt, choose a higher severity/priority.
|
|
Generate clear, concise titles that accurately summarize the complaint in BOTH English and Arabic.
|
|
Provide all text fields in both languages."""
|
|
|
|
try:
|
|
response = cls.chat_completion(
|
|
prompt=prompt,
|
|
system_prompt=system_prompt,
|
|
response_format="json_object",
|
|
temperature=0.2 # Lower temperature for consistent classification
|
|
)
|
|
|
|
# Parse JSON response
|
|
result = json.loads(response)
|
|
|
|
# Use provided title if available, otherwise use AI-generated title
|
|
if title:
|
|
result['title'] = title
|
|
|
|
# Validate severity
|
|
if result.get('severity') not in cls.SEVERITY_CHOICES:
|
|
result['severity'] = 'medium'
|
|
logger.warning(f"Invalid severity, defaulting to medium")
|
|
|
|
# Validate priority
|
|
if result.get('priority') not in cls.PRIORITY_CHOICES:
|
|
result['priority'] = 'medium'
|
|
logger.warning(f"Invalid priority, defaulting to medium")
|
|
|
|
# Validate category
|
|
if result.get('category') not in cls._get_complaint_categories():
|
|
result['category'] = 'other'
|
|
logger.warning(f"Invalid category, defaulting to 'Not specified'")
|
|
|
|
# Ensure title exists
|
|
if not result.get('title'):
|
|
result['title'] = 'Complaint'
|
|
|
|
# Cache result for 1 hour
|
|
cache.set(cache_key, result, timeout=3600)
|
|
|
|
logger.info(f"Complaint analyzed: title={result['title']}, severity={result['severity']}, priority={result['priority']}, department={result.get('department', 'N/A')}")
|
|
return result
|
|
|
|
except json.JSONDecodeError as e:
|
|
logger.error(f"Failed to parse AI response: {e}")
|
|
# Return defaults
|
|
return {
|
|
'title': title or 'Complaint',
|
|
'severity': 'medium',
|
|
'priority': 'medium',
|
|
'category': 'other',
|
|
'subcategory': '',
|
|
'department': '',
|
|
'reasoning': 'AI analysis failed, using default values'
|
|
}
|
|
except AIServiceError as e:
|
|
logger.error(f"AI service error: {e}")
|
|
return {
|
|
'title': title or 'Complaint',
|
|
'severity': 'medium',
|
|
'priority': 'medium',
|
|
'category': 'other',
|
|
'subcategory': '',
|
|
'department': '',
|
|
'reasoning': f'AI service unavailable: {str(e)}'
|
|
}
|
|
|
|
@classmethod
|
|
def classify_sentiment(
|
|
cls,
|
|
text: str
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Classify sentiment of text.
|
|
|
|
Args:
|
|
text: Text to analyze
|
|
|
|
Returns:
|
|
Dictionary with sentiment analysis:
|
|
{
|
|
'sentiment': 'positive' | 'neutral' | 'negative',
|
|
'score': float, # -1.0 to 1.0
|
|
'confidence': float # 0.0 to 1.0
|
|
}
|
|
"""
|
|
prompt = f"""Analyze the sentiment of this text:
|
|
|
|
{text}
|
|
|
|
Provide your analysis in JSON format:
|
|
{{
|
|
"sentiment": "positive|neutral|negative",
|
|
"score": float, # -1.0 (very negative) to 1.0 (very positive)
|
|
"confidence": float # 0.0 to 1.0
|
|
}}"""
|
|
|
|
system_prompt = """You are a sentiment analysis expert.
|
|
Analyze the emotional tone of the text accurately."""
|
|
|
|
try:
|
|
response = cls.chat_completion(
|
|
prompt=prompt,
|
|
system_prompt=system_prompt,
|
|
response_format="json_object",
|
|
temperature=0.1
|
|
)
|
|
|
|
result = json.loads(response)
|
|
return result
|
|
|
|
except (json.JSONDecodeError, AIServiceError) as e:
|
|
logger.error(f"Sentiment analysis failed: {e}")
|
|
return {
|
|
'sentiment': 'neutral',
|
|
'score': 0.0,
|
|
'confidence': 0.0
|
|
}
|
|
|
|
@classmethod
|
|
def analyze_emotion(
|
|
cls,
|
|
text: str
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Analyze emotion in text to identify primary emotion and intensity.
|
|
|
|
Args:
|
|
text: Text to analyze (supports English and Arabic)
|
|
|
|
Returns:
|
|
Dictionary with emotion analysis:
|
|
{
|
|
'emotion': 'anger' | 'sadness' | 'confusion' | 'fear' | 'neutral',
|
|
'intensity': float, # 0.0 to 1.0 (how strong the emotion is)
|
|
'confidence': float # 0.0 to 1.0 (how confident AI is)
|
|
}
|
|
"""
|
|
prompt = f"""Analyze the primary emotion in this text (supports English and Arabic):
|
|
|
|
{text}
|
|
|
|
Identify the PRIMARY emotion from these options:
|
|
- anger: Strong feelings of displeasure, hostility, or rage
|
|
- sadness: Feelings of sorrow, grief, or unhappiness
|
|
- confusion: Lack of understanding, bewilderment, or uncertainty
|
|
- fear: Feelings of anxiety, worry, or being afraid
|
|
- neutral: No strong emotion detected
|
|
|
|
Provide your analysis in JSON format:
|
|
{{
|
|
"emotion": "anger|sadness|confusion|fear|neutral",
|
|
"intensity": float, # 0.0 (very weak) to 1.0 (extremely strong)
|
|
"confidence": float # 0.0 to 1.0 (how confident you are)
|
|
}}
|
|
|
|
Examples:
|
|
- "This is unacceptable! I demand to speak to management!" -> emotion: "anger", intensity: 0.9
|
|
- "I'm very disappointed with the care my father received" -> emotion: "sadness", intensity: 0.7
|
|
- "I don't understand what happened, can you explain?" -> emotion: "confusion", intensity: 0.5
|
|
- "I'm worried about the side effects of this medication" -> emotion: "fear", intensity: 0.6
|
|
- "I would like to report a minor issue" -> emotion: "neutral", intensity: 0.2
|
|
"""
|
|
|
|
system_prompt = """You are an emotion analysis expert fluent in both English and Arabic.
|
|
Analyze the text to identify the PRIMARY emotion and its intensity.
|
|
Be accurate in distinguishing between different emotions.
|
|
Provide intensity scores that reflect how strongly the emotion is expressed (0.0 to 1.0)."""
|
|
|
|
try:
|
|
response = cls.chat_completion(
|
|
prompt=prompt,
|
|
system_prompt=system_prompt,
|
|
response_format="json_object",
|
|
temperature=0.1
|
|
)
|
|
|
|
result = json.loads(response)
|
|
|
|
# Validate emotion
|
|
valid_emotions = ['anger', 'sadness', 'confusion', 'fear', 'neutral']
|
|
if result.get('emotion') not in valid_emotions:
|
|
result['emotion'] = 'neutral'
|
|
logger.warning(f"Invalid emotion detected, defaulting to neutral")
|
|
|
|
# Validate intensity
|
|
intensity = float(result.get('intensity', 0.0))
|
|
if not (0.0 <= intensity <= 1.0):
|
|
intensity = max(0.0, min(1.0, intensity))
|
|
result['intensity'] = intensity
|
|
logger.warning(f"Intensity out of range, clamping to {intensity}")
|
|
|
|
# Validate confidence
|
|
confidence = float(result.get('confidence', 0.0))
|
|
if not (0.0 <= confidence <= 1.0):
|
|
confidence = max(0.0, min(1.0, confidence))
|
|
result['confidence'] = confidence
|
|
logger.warning(f"Confidence out of range, clamping to {confidence}")
|
|
|
|
logger.info(f"Emotion analysis: {result['emotion']}, intensity={intensity}, confidence={confidence}")
|
|
return result
|
|
|
|
except (json.JSONDecodeError, AIServiceError) as e:
|
|
logger.error(f"Emotion analysis failed: {e}")
|
|
return {
|
|
'emotion': 'neutral',
|
|
'intensity': 0.0,
|
|
'confidence': 0.0
|
|
}
|
|
|
|
@classmethod
|
|
def extract_entities(cls, text: str) -> List[Dict[str, str]]:
|
|
prompt = f"""Extract named entities from this text:
|
|
"{text}"
|
|
|
|
Focus heavily on PERSON names.
|
|
IMPORTANT: Extract the clean name only. Remove titles like 'Dr.', 'Nurse', 'Mr.', 'Professor', 'دكتور', 'ممرض'.
|
|
|
|
Provide entities in JSON format:
|
|
{{
|
|
"entities": [
|
|
{{"text": "Name", "type": "PERSON"}},
|
|
{{"text": "DepartmentName", "type": "ORGANIZATION"}}
|
|
]
|
|
}}"""
|
|
|
|
system_prompt = "You are an expert in bilingual NER (Arabic and English). Extract formal names for database lookup."
|
|
|
|
try:
|
|
response = cls.chat_completion(
|
|
prompt=prompt,
|
|
system_prompt=system_prompt,
|
|
response_format="json_object",
|
|
temperature=0.0
|
|
)
|
|
return json.loads(response).get('entities', [])
|
|
except (json.JSONDecodeError, AIServiceError):
|
|
return []
|
|
|
|
@classmethod
|
|
def generate_summary(cls, text: str, max_length: int = 200) -> str:
|
|
"""
|
|
Generate a summary of text.
|
|
|
|
Args:
|
|
text: Text to summarize
|
|
max_length: Maximum length of summary
|
|
|
|
Returns:
|
|
Summary text
|
|
"""
|
|
prompt = f"""Summarize this text in {max_length} characters or less:
|
|
|
|
{text}"""
|
|
|
|
system_prompt = """You are a text summarization expert.
|
|
Create a concise summary that captures the main points."""
|
|
|
|
try:
|
|
response = cls.chat_completion(
|
|
prompt=prompt,
|
|
system_prompt=system_prompt,
|
|
temperature=0.3,
|
|
max_tokens=150
|
|
)
|
|
|
|
return response.strip()
|
|
|
|
except AIServiceError as e:
|
|
logger.error(f"Summary generation failed: {e}")
|
|
return text[:max_length]
|
|
|
|
# Convenience singleton instance
|
|
ai_service = AIService()
|