This commit is contained in:
Marwan Alwali 2025-05-29 21:42:27 +03:00
parent fb0d7f0f20
commit 56cfbad80e
39 changed files with 3224 additions and 2662 deletions

BIN
.DS_Store vendored

Binary file not shown.

1
.gitignore vendored
View File

@ -14,6 +14,7 @@ media
car*.json
car_inventory/settings.py
car_inventory/__pycache__
haikalbot/temp_files_not_included
scripts/dsrpipe.py
def_venv
# Backup files #

BIN
haikalbot/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,204 +0,0 @@
# Optimizing Qwen3-8B for Arabic Language Support in Django AI Analyst
This guide provides specific recommendations for using Qwen3-8B with your Django AI Analyst application for Arabic language support.
## Qwen3-8B Overview
Qwen3-8B is a powerful multilingual large language model developed by Alibaba Cloud. It offers several advantages for Arabic language processing:
- **Strong multilingual capabilities**: Trained on diverse multilingual data including Arabic
- **Efficient performance**: 8B parameter size balances capability and resource requirements
- **Instruction following**: Excellent at following structured instructions in multiple languages
- **Context understanding**: Good comprehension of Arabic context and nuances
- **JSON formatting**: Reliable at generating structured JSON outputs
## Configuration Settings for Qwen3-8B
Update your Django settings to use Qwen3-8B:
```python
# In settings.py
OLLAMA_BASE_URL = "http://10.10.1.132:11434"
OLLAMA_MODEL = "qwen3:8b"
OLLAMA_TIMEOUT = 120 # Seconds
```
## Optimized Parameters for Arabic
When initializing the Ollama LLM with Qwen3-8B for Arabic, use these optimized parameters:
```python
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support with Qwen3-8B.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
# Configure Ollama with parameters optimized for Qwen3-8B with Arabic
return Ollama(
base_url=base_url,
model=model,
timeout=timeout,
# Parameters optimized for Qwen3-8B with Arabic
parameters={
"temperature": 0.2, # Lower temperature for more deterministic outputs
"top_p": 0.8, # Slightly reduced for more focused responses
"top_k": 40, # Standard value works well with Qwen3
"num_ctx": 4096, # Qwen3 supports larger context windows
"num_predict": 2048, # Maximum tokens to generate
"stop": ["```", "</s>"], # Stop sequences for JSON generation
"repeat_penalty": 1.1 # Slight penalty to avoid repetition
}
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
```
## Prompt Template Optimization for Qwen3-8B
Qwen3-8B responds well to clear, structured prompts. For Arabic analysis, use this optimized template:
```python
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic with Qwen3-8B.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template optimized for Qwen3-8B
if language == 'ar':
template = """
أنت مساعد ذكي متخصص في تحليل نماذج Django. مهمتك هي تحليل الاستعلام التالي وتحديد:
1. نوع التحليل المطلوب
2. نماذج البيانات المستهدفة
3. أي معلمات استعلام
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON فقط، بدون أي نص إضافي، كما يلي:
```json
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
```
"""
else:
template = """
You are an intelligent assistant specialized in analyzing Django models. Your task is to analyze the following prompt and determine:
1. The type of analysis required
2. Target data models
3. Any query parameters
Prompt: {prompt}
Provide your answer in JSON format only, without any additional text, as follows:
```json
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
```
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return LLMChain(llm=llm, prompt=prompt_template)
```
## Improved JSON Parsing for Qwen3-8B Responses
Qwen3-8B sometimes includes markdown formatting in its JSON responses. Use this improved parsing function:
```python
def _parse_llm_json_response(result):
"""
Parse JSON from Qwen3-8B response, handling markdown formatting.
"""
try:
# First try to extract JSON from markdown code blocks
json_match = re.search(r'```(?:json)?\s*([\s\S]*?)\s*```', result)
if json_match:
json_str = json_match.group(1).strip()
return json.loads(json_str)
# If no markdown blocks, try to find JSON object directly
json_match = re.search(r'({[\s\S]*})', result)
if json_match:
json_str = json_match.group(1).strip()
return json.loads(json_str)
# If still no match, try to parse the entire response as JSON
return json.loads(result.strip())
except Exception as e:
logger.warning(f"Failed to parse JSON from LLM response: {str(e)}")
return None
```
## Performance Considerations for Qwen3-8B
- **Memory Usage**: Qwen3-8B typically requires 8-16GB of RAM when running on Ollama
- **First Request Latency**: The first request may take 5-10 seconds as the model loads
- **Subsequent Requests**: Typically respond within 1-3 seconds
- **Batch Processing**: Consider batching multiple analyses for efficiency
## Handling Arabic-Specific Challenges with Qwen3-8B
1. **Diacritics**: Qwen3-8B handles Arabic diacritics well, but for consistency, consider normalizing input by removing diacritics
2. **Text Direction**: When displaying results in frontend, ensure proper RTL (right-to-left) support
3. **Dialectal Variations**: Qwen3-8B performs best with Modern Standard Arabic (MSA), but has reasonable support for major dialects
4. **Technical Terms**: For Django-specific technical terms, consider providing a glossary in both English and Arabic
## Example Arabic Prompts Optimized for Qwen3-8B
```
# Count query
كم عدد السيارات المتوفرة في النظام؟
# Relationship analysis
ما هي العلاقة بين نموذج المستخدم ونموذج الطلب؟
# Performance analysis
حدد مشاكل الأداء المحتملة في نموذج المنتج
# Statistical analysis
ما هو متوسط سعر السيارات المتوفرة؟
```
## Troubleshooting Qwen3-8B Specific Issues
1. **Incomplete JSON**: If Qwen3-8B returns incomplete JSON, try:
- Reducing the complexity of your prompt
- Lowering the temperature parameter to 0.1
- Adding explicit JSON formatting instructions
2. **Arabic Character Encoding**: If you see garbled Arabic text, ensure:
- Your database uses UTF-8 encoding
- All HTTP responses include proper content-type headers
- Frontend properly handles Arabic character rendering
3. **Slow Response Times**: If responses are slow:
- Consider using the quantized version: `qwen3:8b-q4_0`
- Reduce context window size if full 4096 context isn't needed
- Implement more aggressive caching
## Conclusion
Qwen3-8B is an excellent choice for Arabic language support in your Django AI Analyst application. With these optimized settings and techniques, you'll get reliable performance for analyzing Django models through Arabic natural language prompts.

View File

@ -1,163 +0,0 @@
# Django AI Analyst - README
This package provides a Django application that enables AI-powered analysis of Django models through natural language prompts. The AI agent can analyze model structures, relationships, and data to provide insights in JSON format.
## Features
- Natural language prompt processing for model analysis
- Support for various types of insights:
- Count queries (e.g., "How many cars do we have?")
- Relationship analysis between models
- Performance optimization suggestions
- Statistical analysis of model fields
- General model structure analysis
- Dealer-specific data access controls
- Caching mechanism for improved performance
- Visualization data generation for frontend display
- Comprehensive test suite
## Installation
1. Add 'ai_analyst' to your INSTALLED_APPS setting:
```python
INSTALLED_APPS = [
...
'ai_analyst',
]
```
2. Include the ai_analyst URLconf in your project urls.py:
```python
path('api/ai/', include('ai_analyst.urls')),
```
3. Run migrations to create the AnalysisCache model:
```bash
python manage.py makemigrations ai_analyst
python manage.py migrate
```
## Usage
Send POST requests to the `/api/ai/analyze/` endpoint with a JSON body containing:
```json
{
"prompt": "How many cars do we have?",
"dealer_id": 1 // Optional, for dealer-specific queries
}
```
The response will be a JSON object with insights based on the prompt:
```json
{
"status": "success",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"prompt": "How many cars do we have?",
"insights": [
{
"type": "count_analysis",
"results": [
{
"model": "Car",
"count": 42,
"filters_applied": {}
}
],
"visualization_data": {
"chart_type": "bar",
"labels": ["Car"],
"data": [42]
}
}
]
}
```
## Customization
### Cache Duration
You can customize the cache duration by setting the `CACHE_DURATION` class variable in the `ModelAnalystView` class:
```python
# In your settings.py
AI_ANALYST_CACHE_DURATION = 7200 # 2 hours in seconds
# Then in views.py
class ModelAnalystView(View):
CACHE_DURATION = getattr(settings, 'AI_ANALYST_CACHE_DURATION', 3600)
# ...
```
### Permission Logic
The `_check_permissions` method in `ModelAnalystView` can be customized to match your application's permission model:
```python
def _check_permissions(self, user, dealer_id):
# Your custom permission logic here
return user.has_perm('ai_analyst.can_analyze_models')
```
## Example Prompts
- "How many cars do we have?"
- "Show relationship between User and Order"
- "What is the average price of products?"
- "Count active users"
- "Identify performance issues in the Order model"
- "Show maximum age of customers"
## Frontend Integration
The JSON responses include visualization_data that can be used with charting libraries like Chart.js:
```javascript
// Example with Chart.js
fetch('/api/ai/analyze/', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt: 'How many cars do we have?',
dealer_id: 1
}),
})
.then(response => response.json())
.then(data => {
if (data.status === 'success' && data.insights.length > 0) {
const insight = data.insights[0];
const vizData = insight.visualization_data;
const ctx = document.getElementById('insightChart').getContext('2d');
new Chart(ctx, {
type: vizData.chart_type,
data: {
labels: vizData.labels,
datasets: [{
label: insight.type,
data: vizData.data,
backgroundColor: [
'rgba(255, 99, 132, 0.2)',
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)'
],
borderColor: [
'rgba(255, 99, 132, 1)',
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)'
],
borderWidth: 1
}]
}
});
}
});
```

788
haikalbot/ai_agent.py Normal file
View File

@ -0,0 +1,788 @@
from dataclasses import dataclass
from typing import List, Dict, Optional, Any, Union
from django.apps import apps
from django.db import models
from django.db.models import QuerySet, Q, F, Value, CharField, Sum, Avg, Count, Max, Min
from django.db.models.functions import Concat, Cast
from django.core.exceptions import FieldDoesNotExist
from django.core.serializers import serialize
from django.conf import settings
from langchain_ollama import ChatOllama
from langchain_core.messages import SystemMessage, HumanMessage
import json
import re
import logging
from functools import reduce
import operator
from sqlalchemy.orm import relationship
logger = logging.getLogger(__name__)
# Configuration settings
LLM_MODEL = getattr(settings, 'MODEL_ANALYZER_LLM_MODEL', 'qwen:7b-chat')
LLM_TEMPERATURE = getattr(settings, 'MODEL_ANALYZER_LLM_TEMPERATURE', 0.3)
LLM_MAX_TOKENS = getattr(settings, 'MODEL_ANALYZER_LLM_MAX_TOKENS', 2048)
CACHE_TIMEOUT = getattr(settings, 'MODEL_ANALYZER_CACHE_TIMEOUT', 3600)
system_instruction = """
You are a specialized AI agent designed to analyze Django models and extract relevant information based on user input in Arabic or English. You must:
1. Model Analysis:
- Parse the user's natural language prompt to understand the analysis requirements
- Identify the relevant Django model(s) from the provided model structure
- Extract only the fields needed for the specific analysis
- Handle both direct fields and relationship fields appropriately
2. Field Selection:
- Determine relevant fields based on:
* Analysis type (count, average, sum, etc.)
* Explicit field mentions in the prompt
* Related fields needed for joins
* Common fields for the requested analysis type
3. Return Structure:
Return a JSON response with:
{
"status": "success",
"analysis_requirements": {
"app_label": "<django_app_name>",
"model_name": "<model_name>",
"fields": ["field1", "field2", ...],
"relationships": [{"field": "related_field", "type": "relation_type", "to": "related_model"}]
},
"language": "<ar|en>"
}
4. Analysis Types:
- COUNT queries: Return id field
- AGGREGATE queries (avg, sum): Return numeric fields
- DATE queries: Return date/timestamp fields
- RELATIONSHIP queries: Return foreign key and related fields
- TEXT queries: Return relevant text fields
5. Special Considerations:
- Handle both Arabic and English inputs
- Consider model relationships for joined queries
- Include only fields necessary for the requested analysis
- Support filtering and grouping requirements
"""
@dataclass
class FieldAnalysis:
name: str
field_type: str
is_required: bool
is_relation: bool
related_model: Optional[str] = None
analysis_relevance: float = 0.0
@dataclass
class ModelAnalysis:
app_label: str
model_name: str
relevant_fields: List[FieldAnalysis]
relationships: List[Dict[str, str]]
confidence_score: float
class DjangoModelAnalyzer:
def __init__(self):
self.analysis_patterns = {
'count': {
'patterns': [r'\b(count|number|how many)\b'],
'fields': ['id'],
'weight': 1.0
},
'aggregate': {
'patterns': [r'\b(average|avg|mean|sum|total)\b'],
'fields': ['price', 'amount', 'value', 'cost', 'quantity'],
'weight': 0.8
},
'temporal': {
'patterns': [r'\b(date|time|when|period)\b'],
'fields': ['created_at', 'updated_at', 'date', 'timestamp'],
'weight': 0.7
}
}
def analyze_prompt(self, prompt: str, model_structure: List) -> ModelAnalysis:
# Initialize LLM
llm = ChatOllama(
model=LLM_MODEL,
temperature=LLM_TEMPERATURE
)
# Get model analysis from LLM
messages = [
SystemMessage(content=system_instruction),
HumanMessage(content=prompt)
]
try:
response = llm.invoke(messages)
if not response or not hasattr(response, 'content') or response.content is None:
raise ValueError("Empty response from LLM")
analysis_requirements = self._parse_llm_response(response.content)
except Exception as e:
logger.error(f"Error in LLM analysis: {e}")
analysis_requirements = self._pattern_based_analysis(prompt, model_structure)
return self._enhance_analysis(analysis_requirements, model_structure)
def _parse_llm_response(self, response: str) -> Dict:
try:
json_match = re.search(r'({.*})', response.replace('\n', ' '), re.DOTALL)
if json_match:
return json.loads(json_match.group(1))
return {}
except Exception as e:
logger.error(f"Error parsing LLM response: {e}")
return {}
def _pattern_based_analysis(self, prompt: str, model_structure: List) -> Dict:
analysis_type = None
relevant_fields = []
for analysis_name, config in self.analysis_patterns.items():
for pattern in config['patterns']:
if re.search(pattern, prompt.lower()):
relevant_fields.extend(config['fields'])
analysis_type = analysis_name
break
if analysis_type:
break
return {
'analysis_type': analysis_type or 'basic',
'fields': list(set(relevant_fields)) or ['id', 'name']
}
def _enhance_analysis(self, requirements: Dict, model_structure: List) -> ModelAnalysis:
app_label = requirements.get("analysis_requirements", {}).get("app_label")
model_name = requirements.get("analysis_requirements", {}).get("model_name")
fields = requirements.get("analysis_requirements", {}).get("fields") or []
if not isinstance(fields, list):
raise ValueError(f"Invalid fields in analysis requirements: {fields}")
try:
model = apps.get_model(app_label, model_name)
except LookupError as e:
logger.error(f"Model lookup error: {e}")
return None
relevant_fields = []
relationships = []
for field_name in fields:
try:
field = model._meta.get_field(field_name)
field_analysis = FieldAnalysis(
name=field_name,
field_type=field.get_internal_type(),
is_required=not field.null if hasattr(field, 'null') else True,
is_relation=field.is_relation,
related_model=field.related_model.__name__ if field.is_relation and hasattr(field,
'related_model') and field.related_model else None
)
field_analysis.analysis_relevance = self._calculate_field_relevance(
field_analysis,
requirements.get('analysis_type', 'basic')
)
relevant_fields.append(field_analysis)
if field.is_relation:
relationships.append({
'field': field_name,
'type': field.get_internal_type(),
'to': field.related_model.__name__ if hasattr(field,
'related_model') and field.related_model else ''
})
except FieldDoesNotExist:
logger.warning(f"Field {field_name} not found in {model_name}")
return ModelAnalysis(
app_label=app_label,
model_name=model_name,
relevant_fields=sorted(relevant_fields, key=lambda x: x.analysis_relevance, reverse=True),
relationships=relationships,
confidence_score=self._calculate_confidence_score(relevant_fields)
)
def _calculate_field_relevance(self, field: FieldAnalysis, analysis_type: str) -> float:
base_score = 0.5
if analysis_type in self.analysis_patterns:
if field.name in self.analysis_patterns[analysis_type]['fields']:
base_score += self.analysis_patterns[analysis_type]['weight']
if field.is_required:
base_score += 0.2
if field.is_relation:
base_score += 0.1
return min(base_score, 1.0)
def _calculate_confidence_score(self, fields: List[FieldAnalysis]) -> float:
if not fields:
return 0.0
return sum(field.analysis_relevance for field in fields) / len(fields)
def get_all_model_structures(filtered_apps: Optional[List[str]] = None) -> List[Dict]:
"""
Retrieve structure information for all Django models, optionally filtered by app names.
Args:
filtered_apps: Optional list of app names to filter models by
Returns:
List of dictionaries containing model structure information
"""
structures = []
for model in apps.get_models():
app_label = model._meta.app_label
if filtered_apps and app_label not in filtered_apps:
continue
fields = {}
relationships = []
for field in model._meta.get_fields():
if field.is_relation:
# Get related model name safely
related_model_name = None
if hasattr(field, 'related_model') and field.related_model:
related_model_name = field.related_model.__name__
elif hasattr(field, 'model') and field.model:
related_model_name = field.model.__name__
if related_model_name: # Only add relationship if we have a valid related model
relationships.append({
"field": field.name,
"type": field.get_internal_type(),
"to": related_model_name
})
else:
fields[field.name] = field.get_internal_type()
structures.append({
"app_label": app_label,
"model_name": model.__name__,
"fields": fields,
"relationships": relationships
})
return structures
def apply_joins(queryset: QuerySet, joins: List[Dict[str, str]]) -> QuerySet:
"""
Apply joins to the queryset based on the provided join specifications.
Args:
queryset: The base queryset to apply joins to
joins: List of join specifications with path and type
Returns:
Updated queryset with joins applied
"""
if not joins:
return queryset
for join in joins:
path = join.get("path")
join_type = join.get("type", "LEFT").upper()
if not path:
continue
try:
if join_type == "LEFT":
queryset = queryset.select_related(path)
else:
queryset = queryset.prefetch_related(path)
except Exception as e:
logger.warning(f"Failed to apply join for {path}: {e}")
return queryset
def apply_filters(queryset: QuerySet, filters: Dict[str, Any]) -> QuerySet:
"""
Apply filters to queryset with advanced filter operations.
Args:
queryset: The base queryset to apply filters to
filters: Dictionary of field:value pairs or complex filter operations
Returns:
Filtered queryset
"""
if not filters:
return queryset
q_objects = []
for key, value in filters.items():
if isinstance(value, dict):
# Handle complex filters
operation = value.get('operation', 'exact')
filter_value = value.get('value')
if not filter_value and operation != 'isnull':
continue
if operation == 'contains':
q_objects.append(Q(**{f"{key}__icontains": filter_value}))
elif operation == 'in':
if isinstance(filter_value, list) and filter_value:
q_objects.append(Q(**{f"{key}__in": filter_value}))
elif operation in ['gt', 'gte', 'lt', 'lte', 'exact', 'iexact', 'startswith', 'endswith']:
q_objects.append(Q(**{f"{key}__{operation}": filter_value}))
elif operation == 'between' and isinstance(filter_value, list) and len(filter_value) >= 2:
q_objects.append(Q(**{
f"{key}__gte": filter_value[0],
f"{key}__lte": filter_value[1]
}))
elif operation == 'isnull':
q_objects.append(Q(**{f"{key}__isnull": bool(filter_value)}))
else:
# Simple exact match
q_objects.append(Q(**{key: value}))
if not q_objects:
return queryset
return queryset.filter(reduce(operator.and_, q_objects))
def process_aggregation(
queryset: QuerySet,
aggregation: str,
fields: List[str],
group_by: Optional[List[str]] = None
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""
Process aggregation queries with support for grouping.
Args:
queryset: The base queryset to aggregate
aggregation: Aggregation type (sum, avg, count, max, min)
fields: Fields to aggregate
group_by: Optional fields to group by
Returns:
Dictionary of aggregation results or list of grouped results
"""
if not fields:
return {"error": "No fields specified for aggregation"}
agg_func_map = {
"sum": Sum,
"avg": Avg,
"count": Count,
"max": Max,
"min": Min
}
agg_func = agg_func_map.get(aggregation.lower())
if not agg_func:
return {"error": f"Unsupported aggregation: {aggregation}"}
try:
if group_by:
# Create aggregation dictionary for valid fields
agg_dict = {}
for field in fields:
if field not in group_by:
agg_dict[f"{aggregation}_{field}"] = agg_func(field)
if not agg_dict:
return {"error": "No valid fields for aggregation after excluding group_by fields"}
# Apply group_by and aggregation
return list(queryset.values(*group_by).annotate(**agg_dict))
else:
# Simple aggregation without grouping
return queryset.aggregate(**{
f"{aggregation}_{field}": agg_func(field)
for field in fields
})
except Exception as e:
logger.error(f"Aggregation error: {e}")
return {"error": f"Aggregation failed: {str(e)}"}
def prepare_chart_data(data: List[Dict], fields: List[str], chart_type: str) -> Optional[Dict[str, Any]]:
"""
Prepare data for chart visualization.
Args:
data: List of data dictionaries
fields: Fields to include in the chart
chart_type: Type of chart (pie, bar, line)
Returns:
Dictionary with chart configuration
"""
if not data or not fields or len(fields) < 1 or not chart_type:
return None
# Validate chart type
chart_type = chart_type.lower()
if chart_type not in ["pie", "bar", "line", "doughnut", "radar", "scatter"]:
chart_type = "bar" # Default to bar chart for unsupported types
try:
# For aggregation results that come as a dictionary
if isinstance(data, dict):
# Convert single dict to list for chart processing
labels = list(data.keys())
values = list(data.values())
return {
"type": chart_type,
"labels": [str(label).replace(f"{fields[0]}_", "") for label in labels],
"data": [float(value) if isinstance(value, (int, float)) else 0 for value in values],
"backgroundColor": [
"rgba(54, 162, 235, 0.6)",
"rgba(255, 99, 132, 0.6)",
"rgba(255, 206, 86, 0.6)",
"rgba(75, 192, 192, 0.6)",
"rgba(153, 102, 255, 0.6)",
"rgba(255, 159, 64, 0.6)"
]
}
# For regular query results as list of dictionaries
# Create labels from first field values
labels = [str(item.get(fields[0], "")) for item in data]
if chart_type == "pie" or chart_type == "doughnut":
# For pie charts, we need just one data series
data_values = []
for item in data:
# Use second field for values if available, otherwise use 1
if len(fields) > 1:
try:
value = float(item.get(fields[1], 0))
except (ValueError, TypeError):
value = 0
data_values.append(value)
else:
data_values.append(1) # Default count if no value field
return {
"type": chart_type,
"labels": labels,
"data": data_values,
"backgroundColor": [
"rgba(54, 162, 235, 0.6)",
"rgba(255, 99, 132, 0.6)",
"rgba(255, 206, 86, 0.6)",
"rgba(75, 192, 192, 0.6)",
"rgba(153, 102, 255, 0.6)",
"rgba(255, 159, 64, 0.6)"
] * (len(data_values) // 6 + 1) # Repeat colors as needed
}
else:
# For other charts, create dataset for each field after the first
datasets = []
for i, field in enumerate(fields[1:], 1):
try:
dataset = {
"label": field,
"data": [float(item.get(field, 0) or 0) for item in data],
"backgroundColor": f"rgba({50 + i * 50}, {100 + i * 40}, 235, 0.6)",
"borderColor": f"rgba({50 + i * 50}, {100 + i * 40}, 235, 1.0)",
"borderWidth": 1
}
datasets.append(dataset)
except (ValueError, TypeError) as e:
logger.warning(f"Error processing field {field} for chart: {e}")
return {
"type": chart_type,
"labels": labels,
"datasets": datasets
}
except Exception as e:
logger.error(f"Error preparing chart data: {e}")
return None
def query_django_model(parsed: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute Django model queries based on parsed analysis requirements.
Args:
parsed: Dictionary containing query parameters:
- app_label: Django app label
- model: Model name
- fields: List of fields to query
- filters: Query filters
- aggregation: Aggregation type
- chart: Chart type for visualization
- joins: List of joins to apply
- group_by: Fields to group by
- order_by: Fields to order by
- limit: Maximum number of results
Returns:
Dictionary with query results
"""
try:
# Extract parameters with defaults
app_label = parsed.get("app_label")
model_name = parsed.get("model_name")
fields = parsed.get("fields", [])
filters = parsed.get("filters", {})
aggregation = parsed.get("aggregation")
chart = parsed.get("chart")
joins = parsed.get("joins", [])
group_by = parsed.get("group_by", [])
order_by = parsed.get("order_by", [])
limit = int(parsed.get("limit", 1000))
language = parsed.get("language", "en")
# Validate required parameters
if not app_label or not model_name:
return {
"status": "error",
"error": "app_label and model are required",
"language": language
}
# Get model class
try:
model = apps.get_model(app_label=app_label, model_name=model_name)
except LookupError:
return {
"status": "error",
"error": f"Model '{model_name}' not found in app '{app_label}'",
"language": language
}
# Validate fields against model
if fields:
model_fields = [f.name for f in model._meta.fields]
invalid_fields = [f for f in fields if f not in model_fields]
if invalid_fields:
logger.warning(f"Invalid fields requested: {invalid_fields}")
fields = [f for f in fields if f in model_fields]
# Build queryset
queryset = model.objects.all()
# Apply joins
queryset = apply_joins(queryset, joins)
# Apply filters
if filters:
try:
queryset = apply_filters(queryset, filters)
except Exception as e:
logger.error(f"Error applying filters: {e}")
return {
"status": "error",
"error": f"Invalid filters: {str(e)}",
"language": language
}
# Handle aggregations
if aggregation:
result = process_aggregation(queryset, aggregation, fields, group_by)
if isinstance(result, dict) and "error" in result:
return {
"status": "error",
"error": result["error"],
"language": language
}
chart_data = None
if chart:
chart_data = prepare_chart_data(result, fields, chart)
return {
"status": "success",
"data": result,
"chart": chart_data,
"language": language
}
# Handle regular queries
try:
# Apply field selection
if fields:
queryset = queryset.values(*fields)
# Apply ordering
if order_by:
queryset = queryset.order_by(*order_by)
# Apply limit (with safety check)
if limit <= 0:
limit = 1000
queryset = queryset[:limit]
# Convert queryset to list
data = list(queryset)
# Prepare chart data if needed
chart_data = None
if chart and data and fields:
chart_data = prepare_chart_data(data, fields, chart)
return {
"status": "success",
"data": data,
"count": len(data),
"chart": chart_data,
"metadata": {
"total_count": len(data),
"fields": fields,
"model": model_name,
"app": app_label
},
"language": language
}
except Exception as e:
logger.error(f"Error executing query: {e}")
return {
"status": "error",
"error": f"Query execution failed: {str(e)}",
"language": language
}
except Exception as e:
logger.error(f"Unexpected error in query_django_model: {e}")
return {
"status": "error",
"error": f"Unexpected error: {str(e)}",
"language": parsed.get("language", "en")
}
def determine_aggregation_type(prompt: str, fields: List[FieldAnalysis]) -> Optional[str]:
"""
Determine the appropriate aggregation type based on the prompt and fields.
Args:
prompt: User prompt text
fields: List of field analysis objects
Returns:
Aggregation type or None
"""
if any(pattern in prompt.lower() for pattern in ['average', 'avg', 'mean', 'معدل', 'متوسط']):
return 'avg'
elif any(pattern in prompt.lower() for pattern in ['sum', 'total', 'مجموع', 'إجمالي']):
return 'sum'
elif any(pattern in prompt.lower() for pattern in ['count', 'number', 'how many', 'عدد', 'كم']):
return 'count'
elif any(pattern in prompt.lower() for pattern in ['maximum', 'max', 'highest', 'أقصى', 'أعلى']):
return 'max'
elif any(pattern in prompt.lower() for pattern in ['minimum', 'min', 'lowest', 'أدنى', 'أقل']):
return 'min'
# Check field types for numeric fields to determine default aggregation
numeric_fields = [field for field in fields if field.field_type in ['DecimalField', 'FloatField', 'IntegerField']]
if numeric_fields:
return 'sum' # Default to sum for numeric fields
return None
def determine_chart_type(prompt: str, fields: List[FieldAnalysis]) -> Optional[str]:
"""
Determine the appropriate chart type based on the prompt and fields.
Args:
prompt: User prompt text
fields: List of field analysis objects
Returns:
Chart type or None
"""
# Check for explicit chart type mentions in prompt
if any(term in prompt.lower() for term in ['line chart', 'time series', 'trend', 'رسم خطي', 'اتجاه']):
return 'line'
elif any(term in prompt.lower() for term in ['bar chart', 'histogram', 'column', 'رسم شريطي', 'أعمدة']):
return 'bar'
elif any(term in prompt.lower() for term in ['pie chart', 'circle chart', 'رسم دائري', 'فطيرة']):
return 'pie'
elif any(term in prompt.lower() for term in ['doughnut', 'دونات']):
return 'doughnut'
elif any(term in prompt.lower() for term in ['radar', 'spider', 'رادار']):
return 'radar'
# Determine chart type based on field types and count
date_fields = [field for field in fields if field.field_type in ['DateField', 'DateTimeField']]
numeric_fields = [field for field in fields if field.field_type in ['DecimalField', 'FloatField', 'IntegerField']]
if date_fields and numeric_fields:
return 'line' # Time series data
elif len(fields) == 2 and len(numeric_fields) >= 1:
return 'bar' # Category and value
elif len(fields) == 1 or (len(fields) == 2 and len(numeric_fields) == 1):
return 'pie' # Single dimension data
elif len(fields) > 2:
return 'bar' # Multi-dimensional data
# Default
return 'bar'
def analyze_prompt(prompt: str) -> Dict[str, Any]:
"""
Analyze a natural language prompt and execute the appropriate Django model query.
Args:
prompt: Natural language prompt from user
Returns:
Dictionary with query results
"""
# Detect language
language = "ar" if bool(re.search(r'[\u0600-\u06FF]', prompt)) else "en"
filtered_apps = ['inventory', 'django_ledger', 'appointments', 'plans']
try:
analyzer = DjangoModelAnalyzer()
model_structure = get_all_model_structures(filtered_apps=filtered_apps)
analysis = analyzer.analyze_prompt(prompt, model_structure)
if not analysis or not analysis.app_label or not analysis.model_name:
return {
"status": "error",
"message": "تعذر العثور على النموذج المطلوب" if language == "ar" else "Missing model information",
"language": language
}
query_params = {
"app_label": analysis.app_label,
"model_name": analysis.model_name,
"fields": [field.name for field in analysis.relevant_fields],
"joins": [{"path": rel["field"], "type": rel["type"]} for rel in analysis.relationships],
"filters": {},
"aggregation": determine_aggregation_type(prompt, analysis.relevant_fields),
"chart": determine_chart_type(prompt, analysis.relevant_fields),
"language": language
}
return query_django_model(query_params)
except Exception as e:
logger.error(f"Error analyzing prompt: {e}")
return {
"status": "error",
"error": "حدث خطأ أثناء تحليل الاستعلام" if language == "ar" else f"Error analyzing prompt: {str(e)}",
"language": language
}

View File

@ -1,231 +0,0 @@
from django.db.models import Avg, Sum, Max, Min, ForeignKey, OneToOneField
import inspect
from django.db import models
from django.utils.translation import gettext_lazy as _
def _localized_keys(language):
if language == 'ar':
return {
'type': 'نوع', 'model': 'النموذج', 'count': 'العدد', 'filters': 'الفلاتر_المطبقة',
'error': 'خطأ', 'chart_type': 'نوع_الرسم_البياني', 'labels': 'التسميات', 'data': 'البيانات',
'visualization_data': 'بيانات_الرسم_البياني', 'field': 'الحقل', 'value': 'القيمة',
'statistic_type': 'نوع_الإحصاء', 'results': 'النتائج', 'title': 'العنوان'
}
else:
return {
'type': 'type', 'model': 'model', 'count': 'count', 'filters': 'filters_applied',
'error': 'error', 'chart_type': 'chart_type', 'labels': 'labels', 'data': 'data',
'visualization_data': 'visualization_data', 'field': 'field', 'value': 'value',
'statistic_type': 'statistic_type', 'results': 'results', 'title': 'title'
}
def generate_count_insight(models, query_params, dealer_id=None, language='ar'):
keys = _localized_keys(language)
results = []
for model in models:
try:
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for key, value in query_params.items():
if key not in ['field', 'operation'] and hasattr(model, key):
try:
field = model._meta.get_field(key)
if isinstance(field, models.IntegerField):
value = int(value)
elif isinstance(field, models.BooleanField):
value = value.lower() in ('true', '1', 'yes')
except Exception:
pass
filters[key] = value
if filters:
queryset = queryset.filter(**filters)
results.append({
keys['model']: model.__name__,
keys['count']: queryset.count(),
keys['filters']: filters
})
except Exception as e:
results.append({
keys['model']: model.__name__,
keys['error']: str(e)
})
return {
'type': keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [r[keys['model']] for r in results if keys['count'] in r],
keys['data']: [r[keys['count']] for r in results if keys['count'] in r]
}
}
def generate_statistics_insight(models, query_params, dealer_id=None, language='ar'):
keys = _localized_keys(language)
results = []
field = query_params.get('field')
operation = query_params.get('operation', 'average')
for model in models:
try:
if not field or not hasattr(model, field):
continue
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for k, v in query_params.items():
if k not in ['field', 'operation'] and hasattr(model, k):
filters[k] = v
if filters:
queryset = queryset.filter(**filters)
stat_map = {
'average': Avg,
'sum': Sum,
'max': Max,
'min': Min
}
if operation in stat_map:
agg = queryset.aggregate(val=stat_map[operation](field))['val']
value = agg
else:
value = queryset.count()
results.append({
keys['model']: model.__name__,
keys['field']: field,
keys['statistic_type']: operation,
keys['value']: value,
keys['filters']: filters
})
except Exception as e:
results.append({keys['model']: model.__name__, keys['error']: str(e)})
return {
'type': keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [f"{r[keys['model']]}.{r[keys['field']]}" for r in results if keys['value'] in r],
keys['data']: [r[keys['value']] for r in results if keys['value'] in r],
keys['title']: f"{operation} of {field}" if language != 'ar' else f"{field} ({operation})"
}
}
def generate_recommendations(model_classes, analysis_type, language='ar'):
recs = []
for model in model_classes:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
msg = f"أضف db_index=True إلى {model.__name__}.{field.name}" if language == 'ar' else f"Add db_index=True to {model.__name__}.{field.name}"
recs.append(msg)
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title', 'description', 'text']:
msg = f"فكر في فهرسة الحقل النصي {model.__name__}.{field.name}" if language == 'ar' else f"Consider indexing the text field {model.__name__}.{field.name}"
recs.append(msg)
return recs[:5]
def generate_model_insight(model, dealer_id=None, language='ar'):
keys = _localized_keys(language)
fields_info = [
{
'name': f.name,
'type': f.__class__.__name__,
'null': f.null,
'blank': f.blank,
'unique': f.unique,
'pk': f.primary_key
} for f in model._meta.fields
]
try:
qs = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
qs = qs.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
qs = qs.filter(dealer=dealer_id)
count = qs.count()
except Exception:
count = "error"
return {
'type': keys['type'] + '_analysis',
keys['model']: model.__name__,
'fields': fields_info,
'count': count
}
def generate_relationship_insight(models, query_params=None, dealer_id=None, language='ar'):
from_ = "من" if language == 'ar' else "from"
to_ = "إلى" if language == 'ar' else "to"
rel_type = "نوع" if language == 'ar' else "type"
relationships = []
for model in models:
for field in model._meta.fields:
if isinstance(field, (ForeignKey, OneToOneField)):
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: field.__class__.__name__
})
for field in model._meta.many_to_many:
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: 'ManyToManyField'
})
return {
'type': 'تحليل_العلاقات' if language == 'ar' else 'relationship_analysis',
'relationships': relationships
}
def generate_performance_insight(models, query_params=None, dealer_id=None, language='ar'):
issues = []
for model in models:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Missing index on ForeignKey'
})
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title']:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Unindexed CharField used in filtering'
})
return {
'type': 'تحليل_الأداء' if language == 'ar' else 'performance_analysis',
'issues': issues
}

View File

@ -1,62 +0,0 @@
from openai import OpenAI
from inventory import models
from car_inventory import settings
def fetch_data(dealer):
try:
# Annotate total cars by make, model, and trim
cars = models.Car.objects.filter(dealer=dealer).count()
print(cars)
if cars:
return f"إجمالي عدد السيارات في المخزون الخاص بـ {dealer.get_local_name}) هو {cars}"
# return f"The total cars in {dealer} inventory is ( {cars} )."
else:
return "No cars found in the inventory."
except Exception as e:
return f"An error occurred while fetching car data: {str(e)}"
def get_gpt4_response(user_input, dealer):
"""
Generates a response from the GPT-4 model based on the provided user input
and the dealer's information. The function is tailored to assist with car
inventory management, including queries about inventory, sales processes,
car transfers, invoices, and other features specific to the Haikal system.
:param user_input: The text input or query provided by the user.
:type user_input: str
:param dealer: Dealer information or identifier used to fetch related car data
or contextual information.
:type dealer: Any
:return: The generated response from the GPT-4 model as a string.
:rtype: str
:raises Exception: In case of an error during the API call to generate the
GPT-4 response.
"""
dealer = dealer
client = OpenAI(api_key=settings.OPENAI_API_KEY)
# if "سيارة في المخزون" in user_input.lower():
# # cars = user_input.split("how many cars")[-1].strip()
# car_data = fetch_data(dealer)
# user_input += f" Relevant car data: {car_data}"
try:
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": (
"You are an assistant specialized in car inventory management for the Haikal system. "
"You can answer questions about the inventory, sales process, car transfers, invoices, "
"and other application-specific functionalities. Always provide responses aligned "
"with the Haikal system's structure and features."
)
},
{"role": "user", "content": user_input},
],
)
return completion.choices[0].message.content.strip()
except Exception as e:
return f"An error occurred while generating the response: {str(e)}"

38
haikalbot/haikal_kb.yaml Normal file
View File

@ -0,0 +1,38 @@
metadata:
system_name: Haikal
version: 1.0
language: bilingual
roles:
- admin
- dealer
- branch
- supplier
features:
add_car:
description: Add a new car to inventory
steps:
- Navigate to the "Inventory" section
- Click "Add New Car"
- Enter required fields: VIN, Make, Model, Year
- Optional: Upload custom card, add warranty or insurance
- Click "Save"
permissions: ["admin", "dealer"]
related_terms: ["chassis", "هيكل", "السيارة"]
create_invoice:
description: Create a sale or purchase invoice
steps:
- Go to the "Invoices" page
- Click "New Invoice"
- Choose Type: Sale or Purchase
- Select invoice_from and invoice_to
- Link existing order(s)
- Confirm and save
permissions: ["admin", "dealer"]
notes: Use sale for customer transactions, purchase for supplier buys
glossary:
VIN: Vehicle Identification Number or chassis number (هيكل السيارة)
custom_card: Official car registration document (استمارة)
adjustment: Any cost added to or subtracted from an order

View File

@ -1,312 +0,0 @@
# Integrating Ollama with LangChain for Django AI Analyst
This guide provides step-by-step instructions for integrating Ollama with LangChain in your Django AI Analyst application, with specific focus on Arabic language support.
## Prerequisites
1. Ollama installed on your system
2. An Ollama model with Arabic support (preferably Jais-13B as recommended)
3. Django project with the AI Analyst application
## Installation Steps
### 1. Install Required Python Packages
```bash
pip install langchain langchain-community
```
### 2. Configure Django Settings
Add the following to your Django settings.py file:
```python
# Ollama and LangChain settings
OLLAMA_BASE_URL = "http://10.10.1.132:11434" # Default Ollama API URL
OLLAMA_MODEL = "qwen3:6b" # Or your preferred model
OLLAMA_TIMEOUT = 120 # Seconds
```
### 3. Create a LangChain Utility Module
Create a new file `ai_analyst/langchain_utils.py`:
```python
from langchain.llms import Ollama
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
# Configure Ollama with appropriate parameters for Arabic
return Ollama(
base_url=base_url,
model=model,
timeout=timeout,
# Parameters to improve Arabic language generation
parameters={
"temperature": 0.7,
"top_p": 0.9,
"top_k": 40,
"num_ctx": 2048, # Context window size
}
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return LLMChain(llm=llm, prompt=prompt_template)
```
### 4. Update Your View to Use LangChain
Modify your `ModelAnalystView` class to use the LangChain utilities:
```python
from .langchain_utils import create_prompt_analyzer_chain
import json
import re
class ModelAnalystView(View):
# ... existing code ...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We'll initialize chains on demand to avoid startup issues
self.prompt_analyzer_chains = {}
def _get_prompt_analyzer_chain(self, language='ar'):
"""
Get or create a prompt analyzer chain for the specified language.
"""
if language not in self.prompt_analyzer_chains:
self.prompt_analyzer_chains[language] = create_prompt_analyzer_chain(language)
return self.prompt_analyzer_chains[language]
def _analyze_prompt_with_llm(self, prompt, language='ar'):
"""
Use LangChain and Ollama to analyze the prompt.
"""
try:
# Get the appropriate chain for the language
chain = self._get_prompt_analyzer_chain(language)
if not chain:
# Fallback to rule-based analysis if chain creation failed
return self._analyze_prompt_rule_based(prompt, language)
# Run the chain
result = chain.run(prompt=prompt)
# Parse the JSON result
# Find JSON content within the response (in case the LLM adds extra text)
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
if json_match:
json_str = json_match.group(1)
return json.loads(json_str)
else:
# Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language)
except Exception as e:
logger.error(f"Error in LLM prompt analysis: {str(e)}")
# Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language)
def _analyze_prompt_rule_based(self, prompt, language='ar'):
"""
Rule-based fallback for prompt analysis.
"""
analysis_type, target_models, query_params = self._analyze_prompt(prompt, language)
return {
"analysis_type": analysis_type,
"target_models": target_models,
"query_params": query_params
}
def _process_prompt(self, prompt, user, dealer_id, language='ar'):
"""
Process the natural language prompt and generate insights.
"""
# ... existing code ...
# Use LLM for prompt analysis
analysis_result = self._analyze_prompt_with_llm(prompt, language)
analysis_type = analysis_result.get('analysis_type', 'general')
target_models = analysis_result.get('target_models', [])
query_params = analysis_result.get('query_params', {})
# ... rest of the method ...
```
## Testing the Integration
Create a test script to verify the Ollama and LangChain integration:
```python
# test_ollama.py
import os
import sys
import django
# Set up Django environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'your_project.settings')
django.setup()
from ai_analyst.langchain_utils import get_ollama_llm, create_prompt_analyzer_chain
def test_ollama_connection():
"""Test basic Ollama connection and response."""
llm = get_ollama_llm()
if not llm:
print("Failed to initialize Ollama LLM")
return
# Test with Arabic prompt
arabic_prompt = "مرحبا، كيف حالك؟"
print(f"Testing Arabic prompt: {arabic_prompt}")
try:
response = llm.invoke(arabic_prompt)
print(f"Response: {response}")
print("Ollama connection successful!")
except Exception as e:
print(f"Error: {str(e)}")
def test_prompt_analysis():
"""Test the prompt analyzer chain."""
chain = create_prompt_analyzer_chain('ar')
if not chain:
print("Failed to create prompt analyzer chain")
return
# Test with an Arabic analysis prompt
analysis_prompt = "كم عدد السيارات التي لدينا؟"
print(f"Testing analysis prompt: {analysis_prompt}")
try:
result = chain.run(prompt=analysis_prompt)
print(f"Analysis result: {result}")
except Exception as e:
print(f"Error: {str(e)}")
if __name__ == "__main__":
print("Testing Ollama and LangChain integration...")
test_ollama_connection()
print("\n---\n")
test_prompt_analysis()
```
Run the test script:
```bash
python test_ollama.py
```
## Troubleshooting
### Common Issues and Solutions
1. **Ollama Connection Error**
- Ensure Ollama is running: `ollama serve`
- Check if the model is downloaded: `ollama list`
- Verify the base URL in settings
2. **Model Not Found**
- Download the model: `ollama pull jais:13b`
- Check model name spelling in settings
3. **Timeout Errors**
- Increase the timeout setting for complex queries
- Consider using a smaller model if your hardware is limited
4. **Poor Arabic Analysis**
- Ensure you're using an Arabic-capable model like Jais-13B
- Check that your prompts are properly formatted in Arabic
- Adjust temperature and other parameters for better results
5. **JSON Parsing Errors**
- Improve the prompt template to emphasize strict JSON formatting
- Implement more robust JSON extraction from LLM responses
## Performance Optimization
For production use, consider these optimizations:
1. **Caching LLM Responses**
- Implement Redis or another caching system for LLM responses
- Cache common analysis patterns to reduce API calls
2. **Batch Processing**
- For bulk analysis, use batch processing to reduce overhead
3. **Model Quantization**
- If performance is slow, consider using a quantized version of the model
- Example: `ollama pull jais:13b-q4_0` for a 4-bit quantized version
4. **Asynchronous Processing**
- For long-running analyses, implement asynchronous processing with Celery
## Advanced Usage: Fine-tuning for Domain-Specific Analysis
For improved performance on your specific domain:
1. Create a dataset of example prompts and expected analyses
2. Use Ollama's fine-tuning capabilities to adapt the model
3. Update your application to use the fine-tuned model
## Conclusion
This integration enables your Django AI Analyst to leverage Ollama's powerful language models through LangChain, with specific optimizations for Arabic language support. The fallback to rule-based analysis ensures robustness, while the LLM-based approach provides more natural language understanding capabilities.

BIN
haikalbot/management/.DS_Store vendored Normal file

Binary file not shown.

View File

View File

@ -0,0 +1,67 @@
from django.core.management.base import BaseCommand
from django.apps import apps
import inspect
import importlib
import yaml
import os
from django.conf import settings
class Command(BaseCommand):
help = "Generate YAML support knowledge base from Django views and models"
def handle(self, *args, **kwargs):
output_file = "haikal_kb.yaml"
kb = {
"metadata": {
"system_name": "Haikal",
"version": "1.0",
"generated_from": "Django",
},
"features": {},
"glossary": {}
}
def extract_doc(item):
doc = inspect.getdoc(item)
return doc.strip() if doc else ""
def get_all_views_modules():
view_modules = []
for app in settings.INSTALLED_APPS:
try:
mod = importlib.import_module(f"{app}.views")
view_modules.append((app, mod))
except ImportError:
continue
return view_modules
def get_all_model_classes():
all_models = []
for model in apps.get_models():
all_models.append((model._meta.app_label, model.__name__, extract_doc(model)))
return all_models
# Extract views
for app, mod in get_all_views_modules():
for name, obj in inspect.getmembers(mod, inspect.isfunction):
doc = extract_doc(obj)
if doc:
kb["features"][name] = {
"description": doc,
"source": f"{app}.views.{name}",
"type": "view_function"
}
# Extract models
for app, name, doc in get_all_model_classes():
if doc:
kb["features"][name] = {
"description": doc,
"source": f"{app}.models.{name}",
"type": "model_class"
}
with open(output_file, "w", encoding="utf-8") as f:
yaml.dump(kb, f, allow_unicode=True, sort_keys=False)
self.stdout.write(self.style.SUCCESS(f"✅ YAML knowledge base saved to {output_file}"))

Binary file not shown.

View File

@ -1,76 +0,0 @@
# Recommended Ollama Models for Arabic Language Support
## Top Recommendations
1. **Jais-13B** (Recommended)
- **Size**: 13 billion parameters
- **Strengths**: Specifically trained on Arabic content, excellent understanding of Arabic context and nuances
- **Command**: `ollama pull jais:13b`
- **Best for**: Production-quality Arabic language understanding and generation
2. **BLOOM-7B**
- **Size**: 7 billion parameters
- **Strengths**: Trained on 46 languages including Arabic, good multilingual capabilities
- **Command**: `ollama pull bloom:7b`
- **Best for**: Multilingual applications where Arabic is one of several languages
3. **Mistral-7B-Instruct**
- **Size**: 7 billion parameters
- **Strengths**: Strong general performance, good instruction following, reasonable Arabic support
- **Command**: `ollama pull mistral:7b-instruct`
- **Best for**: General purpose applications with moderate Arabic requirements
4. **Qwen2-7B**
- **Size**: 7 billion parameters
- **Strengths**: Good multilingual capabilities including Arabic
- **Command**: `ollama pull qwen2:7b`
- **Best for**: Applications requiring both Chinese and Arabic support
## Comparison Table
| Model | Size | Arabic Support | Instruction Following | Resource Requirements | Command |
|-------|------|---------------|----------------------|----------------------|---------|
| Jais-13B | 13B | Excellent | Very Good | High (16GB+ RAM) | `ollama pull jais:13b` |
| BLOOM-7B | 7B | Good | Good | Medium (8GB+ RAM) | `ollama pull bloom:7b` |
| Mistral-7B-Instruct | 7B | Moderate | Excellent | Medium (8GB+ RAM) | `ollama pull mistral:7b-instruct` |
| Qwen2-7B | 7B | Good | Very Good | Medium (8GB+ RAM) | `ollama pull qwen2:7b` |
## Justification for Jais-13B Recommendation
Jais-13B is specifically recommended for your Django AI Analyst application because:
1. **Arabic-First Design**: Unlike most models that treat Arabic as one of many languages, Jais was specifically designed for Arabic language understanding and generation.
2. **Cultural Context**: The model has better understanding of Arabic cultural contexts and nuances, which is important for analyzing domain-specific queries about your data models.
3. **Technical Terminology**: Better handling of technical terms in Arabic, which is crucial for a model analyzing Django models and database structures.
4. **Instruction Following**: Good ability to follow complex instructions in Arabic, which is essential for your prompt-based analysis system.
5. **Performance on Analytical Tasks**: Superior performance on analytical and reasoning tasks in Arabic compared to general multilingual models.
If your system has limited resources (less than 12GB RAM), Mistral-7B-Instruct would be the next best alternative, offering a good balance between performance and resource requirements.
## Installation Instructions
To install the recommended Jais-13B model:
```bash
ollama pull jais:13b
```
For systems with limited resources, install Mistral-7B-Instruct instead:
```bash
ollama pull mistral:7b-instruct
```
After installation, update the `OLLAMA_MODEL` setting in your Django view:
```python
# For Jais-13B
OLLAMA_MODEL = 'jais:13b'
# OR for Mistral-7B-Instruct if resources are limited
# OLLAMA_MODEL = 'mistral:7b-instruct'
```

View File

@ -0,0 +1,19 @@
from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# Load YAML doc
loader = TextLoader("haikal_kb.yaml")
index = VectorstoreIndexCreator().from_loaders([loader])
# Setup QA chain
qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
retriever=index.vectorstore.as_retriever()
)
# Ask a question
query = "How do I add a new invoice?"
response = qa.run(query)
print("Answer:", response)

View File

@ -1,227 +0,0 @@
import inspect
import hashlib
from django.db import models
from django.db.models import Avg, Sum, Max, Min, ForeignKey, OneToOneField, Count
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
def _localized_keys(language):
return {
'type': 'نوع' if language == 'ar' else 'type',
'model': 'النموذج' if language == 'ar' else 'model',
'count': 'العدد' if language == 'ar' else 'count',
'filters': 'الفلاتر_المطبقة' if language == 'ar' else 'filters_applied',
'error': 'خطأ' if language == 'ar' else 'error',
'chart_type': 'نوع_الرسم_البياني' if language == 'ar' else 'chart_type',
'labels': 'التسميات' if language == 'ar' else 'labels',
'data': 'البيانات' if language == 'ar' else 'data',
'visualization_data': 'بيانات_الرسم_البياني' if language == 'ar' else 'visualization_data',
'field': 'الحقل' if language == 'ar' else 'field',
'value': 'القيمة' if language == 'ar' else 'value',
'statistic_type': 'نوع_الإحصاء' if language == 'ar' else 'statistic_type',
'results': 'النتائج' if language == 'ar' else 'results',
'title': 'العنوان' if language == 'ar' else 'title',
}
def generate_count_insight(models, query_params, dealer_id=None, language='en'):
keys = _localized_keys(language)
results = []
for model in models:
try:
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for key, value in query_params.items():
if key in ['field', 'operation']:
continue
if hasattr(model, key):
try:
field = model._meta.get_field(key)
if isinstance(field, models.IntegerField):
value = int(value)
elif isinstance(field, models.BooleanField):
value = value.lower() in ('true', '1', 'yes')
except Exception:
pass
filters[key] = value
if filters:
queryset = queryset.filter(**filters)
results.append({
keys['model']: model.__name__,
keys['count']: queryset.count(),
keys['filters']: filters,
})
except Exception as e:
results.append({
keys['model']: model.__name__,
keys['error']: str(e),
})
return {
keys['type']: keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [r[keys['model']] for r in results if keys['count'] in r],
keys['data']: [r[keys['count']] for r in results if keys['count'] in r],
}
}
def generate_statistics_insight(models, query_params, dealer_id=None, language='en'):
keys = _localized_keys(language)
results = []
field = query_params.get('field')
operation = query_params.get('operation', 'average')
stat_map = {'average': Avg, 'sum': Sum, 'max': Max, 'min': Min}
for model in models:
try:
if not field or not hasattr(model, field):
continue
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {
k: v for k, v in query_params.items()
if k not in ['field', 'operation'] and hasattr(model, k)
}
if filters:
queryset = queryset.filter(**filters)
value = queryset.aggregate(val=stat_map.get(operation, Count)(field))['val']
results.append({
keys['model']: model.__name__,
keys['field']: field,
keys['statistic_type']: operation,
keys['value']: value,
keys['filters']: filters,
})
except Exception as e:
results.append({
keys['model']: model.__name__,
keys['error']: str(e),
})
return {
keys['type']: keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [f"{r[keys['model']]}.{r[keys['field']]}" for r in results if keys['value'] in r],
keys['data']: [r[keys['value']] for r in results if keys['value'] in r],
keys['title']: f"{operation} of {field}" if language != 'ar' else f"{field} ({operation})"
}
}
def generate_recommendations(model_classes, analysis_type, language='en'):
recs = []
for model in model_classes:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
msg = f"أضف db_index=True إلى {model.__name__}.{field.name}" if language == 'ar' else f"Add db_index=True to {model.__name__}.{field.name}"
recs.append(msg)
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title', 'description', 'text']:
msg = f"فكر في فهرسة الحقل النصي {model.__name__}.{field.name}" if language == 'ar' else f"Consider indexing the text field {model.__name__}.{field.name}"
recs.append(msg)
return recs[:5]
def generate_model_insight(model, dealer_id=None, language='en'):
keys = _localized_keys(language)
fields_info = [{
'name': f.name,
'type': f.__class__.__name__,
'null': f.null,
'blank': f.blank,
'unique': f.unique,
'pk': f.primary_key
} for f in model._meta.fields]
try:
qs = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer'):
qs = qs.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
qs = qs.filter(dealer=dealer_id)
count = qs.count()
except Exception:
count = "error"
return {
keys['type']: keys['type'] + '_analysis',
keys['model']: model.__name__,
'fields': fields_info,
'count': count
}
def generate_relationship_insight(models, query_params=None, dealer_id=None, language='en'):
from_ = "من" if language == 'ar' else "from"
to_ = "إلى" if language == 'ar' else "to"
rel_type = "نوع" if language == 'ar' else "type"
relationships = []
for model in models:
for field in model._meta.fields:
if isinstance(field, (ForeignKey, OneToOneField)):
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: field.__class__.__name__,
})
for field in model._meta.many_to_many:
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: 'ManyToManyField'
})
return {
'type': 'تحليل_العلاقات' if language == 'ar' else 'relationship_analysis',
'relationships': relationships
}
def generate_performance_insight(models, query_params=None, dealer_id=None, language='en'):
issues = []
for model in models:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
issues.append({
# 'model': model.__name__,
'field': field.name,
'issue': 'Missing index on ForeignKey'
})
# if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title']:
# issues.append({
# 'model': model.__name__,
# 'field': field.name,
# 'issue': 'Unindexed CharField used in filtering'
# })
return {
'type': 'تحليل_الأداء' if language == 'ar' else 'performance_analysis',
'issues': issues
}

View File

@ -1,61 +0,0 @@
import hashlib
import logging
from django.utils import timezone
from django.db import models
from ..models import AnalysisCache
logger = logging.getLogger(__name__)
class CacheService:
def generate_hash(self, prompt, dealer_id, language):
"""
Generate a unique MD5 hash based on the prompt, dealer ID, and language.
"""
key = f"{prompt}:{dealer_id or 'all'}:{language}"
return hashlib.md5(key.encode()).hexdigest()
def get_cached_result(self, prompt_hash, user, dealer_id):
"""
Retrieve a cached analysis result based on hash, dealer, and optionally user.
"""
try:
# Check for user-specific cache if authenticated
if user and user.is_authenticated:
user_cache = AnalysisCache.objects.filter(
prompt_hash=prompt_hash,
user=user,
expires_at__gt=timezone.now()
).first()
if user_cache:
return user_cache.result
# Otherwise check for dealer-wide cache
dealer_cache = AnalysisCache.objects.filter(
prompt_hash=prompt_hash,
dealer_id=dealer_id,
expires_at__gt=timezone.now()
).first()
return dealer_cache.result if dealer_cache else None
except Exception as e:
logger.warning(f"Cache retrieval failed: {str(e)}")
return None
def cache_result(self, prompt_hash, result, user, dealer_id, duration=3600):
"""
Save or update a cached result with an expiration timestamp.
"""
try:
expires_at = timezone.now() + timezone.timedelta(seconds=duration)
AnalysisCache.objects.update_or_create(
prompt_hash=prompt_hash,
user=user if user and user.is_authenticated else None,
dealer_id=dealer_id,
defaults={
'result': result,
'expires_at': expires_at
}
)
except Exception as e:
logger.warning(f"Cache saving failed: {str(e)}")

View File

@ -1,150 +0,0 @@
import json
import logging
from django.apps import apps
from django.http import JsonResponse
from django.db.models import Count, Avg, Max, Min
from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
logger = logging.getLogger(__name__)
def get_llm_instance():
try:
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)
top_k = getattr(settings, 'OLLAMA_TOP_K', 40)
num_ctx = getattr(settings, 'OLLAMA_NUM_CTX', 4096)
num_predict = getattr(settings, 'OLLAMA_NUM_PREDICT', 2048)
return OllamaLLM(
base_url=base_url,
model=model,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_ctx=num_ctx,
num_predict=num_predict,
stop=["```", "</s>"],
repeat_penalty=1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def get_llm_chain(language='en'):
llm = get_llm_instance()
if not llm:
return None
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
return prompt_template | llm
def analyze_models_with_orm(analysis_type, target_models, query_params):
results = {}
for model_name in target_models:
try:
model = apps.get_model('your_app_name', model_name)
except LookupError:
results[model_name] = {"error": f"Model '{model_name}' not found"}
continue
try:
queryset = model.objects.filter(**query_params)
if analysis_type == 'count':
results[model_name] = {'count': queryset.count()}
elif analysis_type == 'statistics':
numeric_fields = [f.name for f in model._meta.fields if f.get_internal_type() in ['IntegerField', 'FloatField', 'DecimalField']]
stats = {}
for field in numeric_fields:
stats[field] = {
'avg': queryset.aggregate(avg=Avg(field))['avg'],
'max': queryset.aggregate(max=Max(field))['max'],
'min': queryset.aggregate(min=Min(field))['min']
}
results[model_name] = stats
elif analysis_type == 'relationship':
related = {}
for field in model._meta.get_fields():
if field.is_relation and field.many_to_one:
related[field.name] = queryset.values(field.name).annotate(count=Count(field.name)).count()
results[model_name] = related
elif analysis_type == 'performance':
results[model_name] = {'note': 'Performance analysis logic not implemented.'}
else:
results[model_name] = list(queryset.values())
except Exception as e:
results[model_name] = {'error': str(e)}
return results
def analyze_prompt_and_return_json(request):
try:
prompt = request.POST.get('prompt')
language = request.POST.get('language', 'en')
chain = get_llm_chain(language)
if not chain:
return JsonResponse({'success': False, 'error': 'LLM not initialized'})
result = chain.invoke({'prompt': prompt})
parsed = json.loads(result)
analysis_type = parsed.get('analysis_type')
target_models = parsed.get('target_models', [])
query_params = parsed.get('query_params', {})
if not analysis_type or not target_models:
return JsonResponse({'success': False, 'error': 'Incomplete analysis instruction returned by LLM'})
orm_results = analyze_models_with_orm(analysis_type, target_models, query_params)
return JsonResponse({'success': True, 'data': orm_results})
except Exception as e:
return JsonResponse({'success': False, 'error': str(e)})

View File

@ -1,80 +0,0 @@
from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
# timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
return OllamaLLM(
base_url=base_url,
model=model,
temperature= 0.2,
top_p= 0.8,
top_k= 40,
num_ctx= 4096,
num_predict= 2048,
stop= ["```", "</s>"],
repeat_penalty= 1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return prompt_template | llm

View File

@ -1,161 +0,0 @@
# Training Prompt for Django Model Analyst AI Agent
## Agent Purpose
You are a specialized AI agent designed to analyze Django models and provide insightful information to users. Your primary function is to interpret Django model structures, relationships, and metadata to generate meaningful insights that help developers and stakeholders understand their data models better.
## Core Capabilities
1. Parse and understand Django model definitions
2. Identify relationships between models (ForeignKey, ManyToMany, OneToOne)
3. Analyze model fields, types, constraints, and metadata
4. Generate statistics and insights about model usage and structure
5. Provide recommendations for model optimization
6. Respond to natural language queries about models
7. Format responses as structured JSON for integration with frontend applications
## Input Processing
You will receive inputs in the following format:
1. Django model code or references to model files
2. A natural language prompt specifying the type of analysis or insights requested
3. Optional context about the project or specific concerns
## Output Requirements
Your responses must:
1. Be formatted as valid JSON
2. Include a "status" field indicating success or failure
3. Provide an "insights" array containing the requested analysis
4. Include metadata about the analysis performed
5. Be structured in a way that's easy to parse and display in a frontend
## Analysis Types
You should be able to perform the following types of analysis:
### Structural Analysis
- Model count and complexity metrics
- Field type distribution
- Relationship mapping and visualization data
- Inheritance patterns
- Abstract models usage
### Performance Analysis
- Potential query bottlenecks
- Missing index recommendations
- Relationship optimization suggestions
- N+1 query vulnerability detection
### Security Analysis
- Sensitive field detection
- Permission model recommendations
- Data exposure risk assessment
### Data Integrity Analysis
- Constraint analysis
- Validation rule assessment
- Data consistency recommendations
## Example Interactions
### Example 1: Basic Model Analysis
**Input Prompt:**
"Analyze the User and Profile models and show me their relationship structure."
**Expected Response:**
```json
{
"status": "success",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"insights": [
{
"type": "relationship_analysis",
"models": ["User", "Profile"],
"relationships": [
{
"from": "Profile",
"to": "User",
"type": "OneToOne",
"field": "user",
"related_name": "profile",
"on_delete": "CASCADE"
}
],
"visualization_data": {
"nodes": [...],
"edges": [...]
}
}
],
"recommendations": [
"Consider adding an index to Profile.user for faster lookups"
]
}
```
### Example 2: Query Performance Analysis
**Input Prompt:**
"Identify potential performance issues in the Order and OrderItem models."
**Expected Response:**
```json
{
"status": "success",
"request_id": "e5f6g7h8",
"timestamp": "2025-05-25T23:22:30Z",
"insights": [
{
"type": "performance_analysis",
"models": ["Order", "OrderItem"],
"issues": [
{
"severity": "high",
"model": "OrderItem",
"field": "order",
"issue": "Missing database index on ForeignKey",
"impact": "Slow queries when filtering OrderItems by Order",
"solution": "Add db_index=True to order field"
},
{
"severity": "medium",
"model": "Order",
"issue": "No select_related in common queries",
"impact": "Potential N+1 query problems",
"solution": "Use select_related when querying Orders with OrderItems"
}
]
}
],
"code_suggestions": [
{
"model": "OrderItem",
"current": "order = models.ForeignKey(Order, on_delete=models.CASCADE)",
"suggested": "order = models.ForeignKey(Order, on_delete=models.CASCADE, db_index=True)"
}
]
}
```
## Limitations and Boundaries
1. You should not modify or execute code unless explicitly requested
2. You should indicate when you need additional information to provide accurate insights
3. You should acknowledge when a requested analysis is beyond your capabilities
4. You should not make assumptions about implementation details not present in the provided models
5. You should clearly distinguish between factual observations and recommendations
## Learning and Improvement
You should continuously improve your analysis capabilities by:
1. Learning from user feedback
2. Staying updated on Django best practices
3. Expanding your understanding of common model patterns
4. Refining your insight generation to be more relevant and actionable
## Ethical Considerations
1. Respect data privacy by not suggesting exposing sensitive information
2. Provide balanced recommendations that consider security, performance, and usability
3. Be transparent about the limitations of your analysis
4. Avoid making judgments about the quality of code beyond objective metrics
## Technical Integration
You will be integrated into a Django application as a service that:
1. Receives requests through a REST API
2. Has access to model definitions through Django's introspection capabilities
3. Returns JSON responses that can be directly used by frontend components
4. Maintains context across multiple related queries when session information is provided

View File

@ -1,161 +0,0 @@
# تدريب وكيل محلل نماذج Django بالعربية
## هدف الوكيل
أنت وكيل ذكاء اصطناعي متخصص مصمم لتحليل نماذج Django وتقديم معلومات مفيدة للمستخدمين. وظيفتك الأساسية هي تفسير هياكل نماذج Django والعلاقات والبيانات الوصفية لتوليد رؤى ذات معنى تساعد المطورين وأصحاب المصلحة على فهم نماذج البيانات الخاصة بهم بشكل أفضل.
## القدرات الأساسية
1. تحليل وفهم تعريفات نماذج Django
2. تحديد العلاقات بين النماذج (ForeignKey, ManyToMany, OneToOne)
3. تحليل حقول النموذج وأنواعها والقيود والبيانات الوصفية
4. توليد إحصائيات ورؤى حول استخدام النموذج وهيكله
5. تقديم توصيات لتحسين النموذج
6. الاستجابة للاستعلامات باللغة الطبيعية حول النماذج
7. تنسيق الردود كـ JSON منظم للتكامل مع تطبيقات الواجهة الأمامية
## معالجة المدخلات
ستتلقى المدخلات بالتنسيق التالي:
1. كود نموذج Django أو مراجع لملفات النموذج
2. استعلام باللغة الطبيعية يحدد نوع التحليل أو الرؤى المطلوبة
3. سياق اختياري حول المشروع أو مخاوف محددة
## متطلبات المخرجات
يجب أن تكون ردودك:
1. منسقة كـ JSON صالح
2. تتضمن حقل "status" يشير إلى النجاح أو الفشل
3. توفر مصفوفة "insights" تحتوي على التحليل المطلوب
4. تتضمن بيانات وصفية حول التحليل الذي تم إجراؤه
5. منظمة بطريقة يسهل تحليلها وعرضها في واجهة أمامية
## أنواع التحليل
يجب أن تكون قادرًا على إجراء الأنواع التالية من التحليل:
### التحليل الهيكلي
- عدد النماذج ومقاييس التعقيد
- توزيع أنواع الحقول
- رسم خرائط العلاقات وبيانات التصور
- أنماط الوراثة
- استخدام النماذج المجردة
### تحليل الأداء
- اختناقات الاستعلام المحتملة
- توصيات الفهرس المفقود
- اقتراحات تحسين العلاقة
- كشف ضعف استعلام N+1
### تحليل الأمان
- كشف الحقول الحساسة
- توصيات نموذج الإذن
- تقييم مخاطر التعرض للبيانات
### تحليل سلامة البيانات
- تحليل القيود
- تقييم قواعد التحقق
- توصيات اتساق البيانات
## أمثلة على التفاعلات
### مثال 1: تحليل النموذج الأساسي
**استعلام المدخلات:**
"قم بتحليل نماذج المستخدم والملف الشخصي وأظهر لي هيكل العلاقة بينهما."
**الرد المتوقع:**
```json
{
"status": "نجاح",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"insights": [
{
"type": "تحليل_العلاقات",
"models": ["User", "Profile"],
"relationships": [
{
"from": "Profile",
"to": "User",
"type": "OneToOne",
"field": "user",
"related_name": "profile",
"on_delete": "CASCADE"
}
],
"visualization_data": {
"nodes": [...],
"edges": [...]
}
}
],
"recommendations": [
"فكر في إضافة فهرس إلى Profile.user للبحث الأسرع"
]
}
```
### مثال 2: تحليل أداء الاستعلام
**استعلام المدخلات:**
"حدد مشاكل الأداء المحتملة في نماذج الطلب وعناصر الطلب."
**الرد المتوقع:**
```json
{
"status": "نجاح",
"request_id": "e5f6g7h8",
"timestamp": "2025-05-25T23:22:30Z",
"insights": [
{
"type": "تحليل_الأداء",
"models": ["Order", "OrderItem"],
"issues": [
{
"severity": "عالية",
"model": "OrderItem",
"field": "order",
"issue": "فهرس قاعدة بيانات مفقود على ForeignKey",
"impact": "استعلامات بطيئة عند تصفية OrderItems حسب Order",
"solution": "أضف db_index=True إلى حقل order"
},
{
"severity": "متوسطة",
"model": "Order",
"issue": "لا يوجد select_related في الاستعلامات الشائعة",
"impact": "مشاكل استعلام N+1 محتملة",
"solution": "استخدم select_related عند الاستعلام عن Orders مع OrderItems"
}
]
}
],
"code_suggestions": [
{
"model": "OrderItem",
"current": "order = models.ForeignKey(Order, on_delete=models.CASCADE)",
"suggested": "order = models.ForeignKey(Order, on_delete=models.CASCADE, db_index=True)"
}
]
}
```
## القيود والحدود
1. لا يجب عليك تعديل أو تنفيذ التعليمات البرمجية ما لم يُطلب منك ذلك صراحةً
2. يجب أن تشير عندما تحتاج إلى معلومات إضافية لتقديم رؤى دقيقة
3. يجب أن تعترف عندما يكون التحليل المطلوب خارج قدراتك
4. لا يجب أن تفترض تفاصيل التنفيذ غير الموجودة في النماذج المقدمة
5. يجب أن تميز بوضوح بين الملاحظات الواقعية والتوصيات
## التعلم والتحسين
يجب أن تحسن باستمرار قدرات التحليل الخاصة بك من خلال:
1. التعلم من تعليقات المستخدم
2. البقاء على اطلاع بأفضل ممارسات Django
3. توسيع فهمك لأنماط النموذج الشائعة
4. تحسين توليد الرؤى لتكون أكثر صلة وقابلية للتنفيذ
## الاعتبارات الأخلاقية
1. احترام خصوصية البيانات من خلال عدم اقتراح كشف المعلومات الحساسة
2. تقديم توصيات متوازنة تراعي الأمان والأداء وسهولة الاستخدام
3. الشفافية بشأن حدود تحليلك
4. تجنب إصدار أحكام حول جودة الكود بما يتجاوز المقاييس الموضوعية
## التكامل التقني
سيتم دمجك في تطبيق Django كخدمة:
1. تتلقى الطلبات من خلال واجهة برمجة تطبيقات REST
2. لديها إمكانية الوصول إلى تعريفات النموذج من خلال قدرات التفتيش الذاتي لـ Django
3. تعيد استجابات JSON التي يمكن استخدامها مباشرة بواسطة مكونات الواجهة الأمامية
4. تحافظ على السياق عبر استعلامات متعددة ذات صلة عند توفير معلومات الجلسة

View File

@ -1,8 +1,8 @@
from django.urls import path
from . import views
app_name = "haikalbot"
# app_name = "haikalbot"
urlpatterns = [
path("analyze/", views.ModelAnalystView.as_view(), name="haikalbot"),
path("bot/", views.HaikalBot.as_view(), name="haikalbot"),
]

66
haikalbot/utils/export.py Normal file
View File

@ -0,0 +1,66 @@
from django.http import HttpResponse
import pandas as pd
from io import BytesIO, StringIO
def export_to_excel(self, data, filename):
"""
Export data to Excel format.
Args:
data: Data to export
filename: Base filename without extension
Returns:
HttpResponse: Response with Excel file
"""
# Convert data to DataFrame
df = pd.DataFrame(data)
# Create Excel file in memory
excel_file = BytesIO()
with pd.ExcelWriter(excel_file, engine='xlsxwriter') as writer:
df.to_excel(writer, sheet_name='Model Analysis', index=False)
# Auto-adjust columns width
worksheet = writer.sheets['Model Analysis']
for i, col in enumerate(df.columns):
max_width = max(df[col].astype(str).map(len).max(), len(col)) + 2
worksheet.set_column(i, i, max_width)
# Set up response
excel_file.seek(0)
response = HttpResponse(
excel_file.read(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = f'attachment; filename="{filename}.xlsx"'
return response
def export_to_csv(self, data, filename):
"""
Export data to CSV format.
Args:
data: Data to export
filename: Base filename without extension
Returns:
HttpResponse: Response with CSV file
"""
# Convert data to DataFrame
df = pd.DataFrame(data)
# Create CSV file in memory
csv_file = StringIO()
df.to_csv(csv_file, index=False)
# Set up response
response = HttpResponse(csv_file.getvalue(), content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{filename}.csv"'
return response

View File

@ -1,253 +1,63 @@
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.views import View
from django.http import JsonResponse
from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
import json
import hashlib
from django.shortcuts import render
from django.utils.translation import gettext as _
from django.views import View
import logging
import uuid
import re
from inventory import models as inventory_models
from inventory.utils import get_user_type
from .models import AnalysisCache
from .services.llm_service import get_llm_chain
from .services.analysis_service import (
generate_model_insight,
generate_count_insight,
generate_relationship_insight,
generate_performance_insight,
generate_statistics_insight,
generate_recommendations
)
from .services.cache_service import CacheService
from .utils.response_formatter import format_response
from .ai_agent import analyze_prompt
from .utils.export import export_to_excel, export_to_csv
logger = logging.getLogger(__name__)
@method_decorator(csrf_exempt, name='dispatch')
class ModelAnalystView(View):
"""
View for handling model analysis requests and rendering the chatbot interface.
This view provides both GET and POST methods:
- GET: Renders the chatbot interface
- POST: Processes analysis requests and returns JSON responses
The view includes caching, permission checking, and multilingual support.
"""
# Configuration settings (can be moved to Django settings)
CACHE_DURATION = getattr(settings, 'ANALYSIS_CACHE_DURATION', 3600)
DEFAULT_LANGUAGE = getattr(settings, 'DEFAULT_LANGUAGE', 'en')
class HaikalBot(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
"""
Render the chatbot interface.
:param request: The HTTP request
:return: Rendered chatbot.html template
Render the chat interface.
"""
context = {
'dark_mode': request.session.get('dark_mode', False)
'dark_mode': request.session.get('dark_mode', False),
'page_title': _('AI Assistant')
}
return render(request, "haikalbot/chatbot.html", context)
return render(request, "haikalbot/chat.html", context)
def post(self, request, *args, **kwargs):
"""
Process analysis requests and return JSON responses.
:param request: The HTTP request containing the prompt
:return: JsonResponse with analysis results
Process the prompt and return results.
"""
prompt = request.POST.get("prompt")
export = request.POST.get("export")
language = request.POST.get("language", request.LANGUAGE_CODE)
if not prompt:
error_msg = _("Prompt is required.") if language != "ar" else "الاستعلام مطلوب."
return JsonResponse({"status": "error", "error": error_msg}, status=400)
try:
# Parse request data
data = json.loads(request.body)
prompt = data.get('prompt')
language = data.get('language', self.DEFAULT_LANGUAGE)
dealer = get_user_type(request)
result = analyze_prompt(prompt)
# Validate request
if not prompt:
error_msg = "الاستعلام مطلوب" if language == 'ar' else "Prompt is required"
return self._error_response(error_msg, 400)
# Handle export requests if data is available
if export and result.get("status") == "success" and result.get("data"):
try:
if export == "excel":
return export_to_excel(result["data"])
elif export == "csv":
return export_to_csv(result["data"])
except Exception as e:
logger.error(f"Export error: {e}")
result["export_error"] = str(e)
if not self._check_permissions(dealer.id):
error_msg = "تم رفض الإذن" if language == 'ar' else "Permission denied"
return self._error_response(error_msg, 403)
return JsonResponse(result, safe=False)
# Check cache
cache_service = CacheService()
prompt_hash = cache_service.generate_hash(prompt, dealer.id, language)
cached_result = cache_service.get_cached_result(prompt_hash, request.user, dealer.id)
if cached_result:
return JsonResponse(cached_result)
# Process prompt and generate insights
insights = self._process_prompt(prompt, dealer, language)
# Cache results
cache_service.cache_result(
prompt_hash,
insights,
request.user,
dealer.id,
self.CACHE_DURATION
)
return JsonResponse(insights)
except json.JSONDecodeError:
error_msg = "بيانات JSON غير صالحة في نص الطلب" if language == 'ar' else "Invalid JSON in request body"
return self._error_response(error_msg, 400)
except Exception as e:
logger.exception("Error processing model analysis request")
error_msg = f"حدث خطأ: {str(e)}" if language == 'ar' else f"An error occurred: {str(e)}"
return self._error_response(error_msg, 500)
logger.exception(f"Error processing prompt: {e}")
error_msg = _("An error occurred while processing your request.")
if language == "ar":
error_msg = "حدث خطأ أثناء معالجة طلبك."
def _error_response(self, message, status):
"""
Create a standardized error response.
:param message: Error message
:param status: HTTP status code
:return: JsonResponse with error details
"""
return JsonResponse({"status": "error", "message": message}, status=status)
def _check_permissions(self, dealer_id):
"""
Check if the dealer has permissions to access the analysis.
:param dealer_id: ID of the dealer
:return: True if dealer has permissions, False otherwise
"""
try:
return inventory_models.Dealer.objects.filter(id=dealer_id).exists()
except Exception:
logger.exception("Error checking permissions")
return False
def _process_prompt(self, prompt, dealer, language):
"""
Process the prompt and generate insights.
:param prompt: User's prompt text
:param dealer: Dealer object
:param language: Language code (e.g., 'en', 'ar')
:return: Dictionary with analysis results
"""
# Initialize response structure
response = format_response(
prompt=prompt,
language=language,
request_id=str(uuid.uuid4()),
timestamp=timezone.now().isoformat()
)
# Get LLM chain for prompt analysis
chain = get_llm_chain(language=language)
# Parse prompt using LLM
if chain:
try:
result = chain.invoke({"prompt": prompt})
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
result = json.loads(json_match.group(1)) if json_match else {}
except Exception as e:
logger.error(f"LLM error fallback: {e}")
result = {}
else:
result = {}
# Extract analysis parameters
analysis_type = result.get('analysis_type', 'general')
target_models = result.get('target_models', [])
query_params = result.get('query_params', {})
# Get models to analyze
all_models = list(apps.get_models())
models_to_analyze = self._filter_models(all_models, target_models)
if dealer:
models_to_analyze = self._filter_by_dealer(models_to_analyze, dealer.id)
# Select analysis method based on type
analysis_method = {
'count': generate_count_insight,
'relationship': generate_relationship_insight,
'performance': generate_performance_insight,
'statistics': generate_statistics_insight
}.get(analysis_type, self._generate_model_insight_all)
# Generate insights
insights = analysis_method(models_to_analyze, query_params, dealer.id if dealer else None, language)
# Add insights to response
insights_key = "التحليلات" if language == 'ar' else "insights"
if isinstance(insights, list):
response[insights_key].extend(insights)
else:
response[insights_key].append(insights)
# Generate recommendations
recommendations = generate_recommendations(models_to_analyze, analysis_type, language)
if recommendations:
recs_key = "التوصيات" if language == 'ar' else "recommendations"
response[recs_key] = recommendations
# Add plain text summary for response
summary_lines = []
for insight in response[insights_key]:
if isinstance(insight, dict):
summary_lines.append(insight.get('type', 'Insight'))
else:
summary_lines.append(str(insight))
response['response'] = "\n".join(summary_lines)
return response
def _filter_models(self, all_models, target_models):
"""
Filter models based on target model names.
:param all_models: List of all available models
:param target_models: List of target model names
:return: Filtered list of models
"""
if not target_models:
return all_models
return [m for m in all_models if m.__name__ in target_models or
m.__name__.lower() in [t.lower() for t in target_models]]
def _filter_by_dealer(self, models, dealer_id):
"""
Filter models that are relevant to the dealer.
:param models: List of models
:param dealer_id: ID of the dealer
:return: Filtered list of models
"""
dealer_models = [m for m in models if any(f.name in ('dealer', 'dealer_id')
for f in m._meta.fields)]
return dealer_models if dealer_models else models
def _generate_model_insight_all(self, models, query_params, dealer_id, language):
"""
Generate insights for all models.
:param models: List of models
:param query_params: Query parameters
:param dealer_id: ID of the dealer
:param language: Language code
:return: List of insights
"""
return [generate_model_insight(m, dealer_id, language) for m in models]
return JsonResponse({
"status": "error",
"error": error_msg,
"details": str(e) if request.user.is_staff else None
}, status=500)

View File

@ -6,7 +6,7 @@ import requests
import json
from pyvin import VIN
from vin import VIN
from django.conf import settings
from .models import CarMake
@ -111,9 +111,9 @@ def decode_vin(vin):
data = {}
if v:
data = {
"maker": v.Make,
"model": v.Model,
"modelYear": v.ModelYear,
"maker": v.make,
"model": v.model,
"modelYear": v.model_year,
}
return data if all([x for x in data.values()]) else None

View File

@ -1028,8 +1028,7 @@ class CarFinanceCalculator:
def calculate_totals(self):
total_price = sum(
Decimal(self._get_nested_value(item, self.CAR_FINANCE_KEY, 'selling_price')) *
int(self._get_quantity(item))
Decimal(self._get_nested_value(item, self.CAR_FINANCE_KEY, 'selling_price')) * int(self._get_quantity(item))
for item in self.item_transactions
)
total_additionals = sum(Decimal(x.get('price_')) for x in self._get_additional_services())
@ -1068,7 +1067,6 @@ class CarFinanceCalculator:
def get_item_transactions(txs):
"""
Extracts and compiles relevant transaction details from a list of transactions,

View File

@ -658,40 +658,40 @@ class AjaxHandlerView(LoginRequiredMixin, View):
vin_no = vin_no.strip()
vin_data = {}
decoding_method = ""
if result := decodevin(vin_no):
manufacturer_name, model_name, year_model = result.values()
car_make = get_make(manufacturer_name)
car_model = get_model(model_name, car_make)
logger.info(
f"VIN decoded using {decoding_method}: Make={manufacturer_name}, Model={model_name}, Year={year_model}"
)
if not car_make:
return JsonResponse(
{
"success": False,
"error": _("Manufacturer not found in the database"),
},
status=404,
)
vin_data["make_id"] = car_make.id_car_make
vin_data["name"] = car_make.name
vin_data["arabic_name"] = car_make.arabic_name
if not car_model:
vin_data["model_id"] = ""
else:
vin_data["model_id"] = car_model.id_car_model
vin_data["year"] = year_model
return JsonResponse({"success": True, "data": vin_data})
# manufacturer_name = model_name = year_model = None
if not (result := decodevin(vin_no)):
return JsonResponse(
{"success": False, "error": _("VIN not found in all sources")},
status=404,
)
manufacturer_name, model_name, year_model = result.values()
car_make = get_make(manufacturer_name)
car_model = get_model(model_name, car_make)
logger.info(
f"VIN decoded using {decoding_method}: Make={manufacturer_name}, Model={model_name}, Year={year_model}"
return JsonResponse(
{"success": False, "error": _("VIN not found in all sources")},
status=404,
)
if not car_make:
return JsonResponse(
{
"success": False,
"error": _("Manufacturer not found in the database"),
},
status=404,
)
vin_data["make_id"] = car_make.id_car_make
vin_data["name"] = car_make.name
vin_data["arabic_name"] = car_make.arabic_name
if not car_model:
vin_data["model_id"] = ""
else:
vin_data["model_id"] = car_model.id_car_model
vin_data["year"] = year_model
return JsonResponse({"success": True, "data": vin_data})
def get_models(self, request):
make_id = request.GET.get("make_id")
car_models = (
@ -7896,6 +7896,7 @@ def submit_plan(request):
def payment_callback(request):
message = request.GET.get("message")
dealer = get_user_type(request)
payment_id = request.GET.get("id")
history = models.PaymentHistory.objects.filter(transaction_id=payment_id).first()
@ -7933,7 +7934,7 @@ def payment_callback(request):
elif payment_status == "failed":
history.status = "failed"
history.save()
message = request.GET.get("message")
return render(request, "payment_failed.html", {"message": message})

Binary file not shown.

File diff suppressed because it is too large Load Diff

BIN
static/.DS_Store vendored

Binary file not shown.

1356
static/icons/HaikalAi.ai Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

BIN
templates/.DS_Store vendored

Binary file not shown.

BIN
templates/haikalbot/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,188 @@
{% extends 'base.html' %}
{% load i18n static %}
{% block title %}Haikal Bot{% endblock %}
{% block content %}
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/PapaParse/5.3.2/papaparse.min.js"></script>
<div class="container mt-5">
<div class="card shadow-sm">
<div class="card-header d-flex justify-content-between align-items-center">
<h5 class="mb-0"><i class="fas fa-robot me-2"></i>{% trans "HaikalBot" %}</h5>
<div>
<button id="export-btn" class="btn btn-sm btn-outline-secondary" style="display:none;">
{% trans "Export CSV" %}
</button>
</div>
</div>
<div class="card-body" style="max-height: 60vh; overflow-y: auto;" id="chat-history"></div>
<div class="card-footer bg-white border-top">
<form id="chat-form" class="d-flex align-items-center gap-2">
<button type="button" class="btn btn-light" id="mic-btn"><i class="fas fa-microphone"></i></button>
<input type="text" class="form-control" id="chat-input" placeholder="{% trans 'Type your question...' %}" required />
<button type="submit" class="btn btn-primary"><i class="fas fa-paper-plane"></i></button>
</form>
</div>
<div id="chart-container" style="display:none;" class="p-4 border-top">
<canvas id="chart-canvas" height="200px"></canvas>
</div>
</div>
</div>
<script>
const chatHistory = document.getElementById('chat-history');
const chartContainer = document.getElementById('chart-container');
const chartCanvas = document.getElementById('chart-canvas');
const exportBtn = document.getElementById('export-btn');
let chartInstance = null;
let latestDataTable = null;
function getCookie(name) {
let cookieValue = null;
if (document.cookie && document.cookie !== "") {
const cookies = document.cookie.split(";");
for (let cookie of cookies) {
cookie = cookie.trim();
if (cookie.substring(0, name.length + 1) === name + "=") {
cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
break;
}
}
}
return cookieValue;
}
function speak(text) {
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = document.documentElement.lang || "en";
window.speechSynthesis.speak(utterance);
}
function renderTable(data) {
latestDataTable = data;
exportBtn.style.display = 'inline-block';
const headers = Object.keys(data[0]);
let html = '<div class="table-responsive"><table class="table table-bordered table-striped"><thead><tr>';
headers.forEach(h => html += `<th>${h}</th>`);
html += '</tr></thead><tbody>';
data.forEach(row => {
html += '<tr>' + headers.map(h => `<td>${row[h]}</td>`).join('') + '</tr>';
});
html += '</tbody></table></div>';
return html;
}
function appendMessage(role, htmlContent) {
const align = role === 'AI' ? 'bg-secondary-light' : 'bg-primary-light';
chatHistory.innerHTML += `
<div class="mb-3 p-3 rounded ${align}">
<strong>${role}:</strong><br>${htmlContent}
</div>
`;
chatHistory.scrollTop = chatHistory.scrollHeight;
}
document.getElementById('chat-form').addEventListener('submit', async function(e) {
e.preventDefault();
const input = document.getElementById('chat-input');
const prompt = input.value.trim();
const csrfToken = getCookie("csrftoken");
if (!prompt) return;
appendMessage('You', prompt);
input.value = "";
chartContainer.style.display = 'none';
exportBtn.style.display = 'none';
if (chartInstance) {
chartInstance.destroy();
chartInstance = null;
}
const response = await fetch("{% url 'haikalbot' %}", {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
"X-CSRFToken": csrfToken
},
body: new URLSearchParams({ prompt })
});
const result = await response.json();
// Show chart if available
if (result.chart && result.chart.type && result.chart.labels && result.chart.data) {
chartInstance = new Chart(chartCanvas, {
type: result.chart.type,
data: {
labels: result.chart.labels,
datasets: [{
label: result.chart.labels.join(", "),
data: result.chart.data,
backgroundColor: result.chart.backgroundColor || []
}]
},
options: {
responsive: true,
plugins: {
title: {
display: true,
text: result.chart.type.toUpperCase()
}
}
}
});
chartContainer.style.display = 'block';
appendMessage('AI', `{% trans "Chart displayed below." %}`);
return;
}
// Table if list of objects
if (Array.isArray(result.data) && result.data.length && typeof result.data[0] === 'object') {
const tableHTML = renderTable(result.data);
appendMessage('AI', tableHTML);
} else {
const content = typeof result.data === 'object'
? `<pre>${JSON.stringify(result.data, null, 2)}</pre>`
: `<p>${result.data}</p>`;
appendMessage('AI', content);
}
});
document.getElementById('export-btn').addEventListener('click', () => {
if (!latestDataTable) return;
const csv = Papa.unparse(latestDataTable);
const blob = new Blob([csv], { type: 'text/csv;charset=utf-8;' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = 'haikal_data.csv';
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
});
// Voice input (speech-to-text)
document.getElementById('mic-btn').addEventListener('click', () => {
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.lang = document.documentElement.lang || "en";
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.onresult = (event) => {
const speech = event.results[0][0].transcript;
document.getElementById('chat-input').value = speech;
};
recognition.onerror = (e) => {
console.error('Speech recognition error', e);
};
recognition.start();
});
</script>
{% endblock %}

View File

@ -500,7 +500,6 @@ $(document).ready(function() {
return isArabic ? (obj[arabicKey] || obj[englishKey] || '') : (obj[englishKey] || obj[arabicKey] || '');
}
// Copy message functionality
$(document).on('click', '.copy-btn', function() {
const text = $(this).closest('.d-flex').find('.chat-message').text().trim();
navigator.clipboard.writeText(text).then(() => {
@ -516,7 +515,6 @@ $(document).ready(function() {
}, 1500);
}
// Initialize
scrollToBottom();
});
</script>

View File

@ -34,17 +34,13 @@
{{ service.pk }}
</td>
<td class="align-middle product white-space-nowrap">
{{ service.get_local_name }}
{{ service.get_local_name|default:service.name }}
</td>
<td class="align-middle product white-space-nowrap">
{{ service.uom }}
{{ service.get_uom_display }}
</td>
<td class="align-middle product white-space-nowrap">
{% if service.taxable %}
Yes
{% else %}
No
{% endif %}
{{ service.taxable|yesno }}
</td>
<td class="align-middle product white-space-nowrap">
{{ service.item.co }}