This commit is contained in:
Marwan Alwali 2025-05-26 15:17:10 +03:00
parent f5c15feca1
commit 250e0aa7bb
59 changed files with 6032 additions and 2264 deletions

View File

@ -1,4 +1,4 @@
# Generated by Django 5.1.7 on 2025-05-04 16:07 # Generated by Django 5.2.1 on 2025-05-25 23:01
from django.db import migrations, models from django.db import migrations, models

View File

@ -24,7 +24,7 @@ urlpatterns += i18n_patterns(
# path('prometheus/', include('django_prometheus.urls')), # path('prometheus/', include('django_prometheus.urls')),
path('', include('inventory.urls')), path('', include('inventory.urls')),
path('ledger/', include('django_ledger.urls', namespace='django_ledger')), path('ledger/', include('django_ledger.urls', namespace='django_ledger')),
# path("haikalbot/", include("haikalbot.urls")), path("haikalbot/", include("haikalbot.urls")),
path('appointment/', include('appointment.urls')), path('appointment/', include('appointment.urls')),
path('plans/', include('plans.urls')), path('plans/', include('plans.urls')),
path("schema/", Schema.as_view()), path("schema/", Schema.as_view()),

BIN
haikalbot.zip Normal file

Binary file not shown.

View File

@ -0,0 +1,204 @@
# Optimizing Qwen3-8B for Arabic Language Support in Django AI Analyst
This guide provides specific recommendations for using Qwen3-8B with your Django AI Analyst application for Arabic language support.
## Qwen3-8B Overview
Qwen3-8B is a powerful multilingual large language model developed by Alibaba Cloud. It offers several advantages for Arabic language processing:
- **Strong multilingual capabilities**: Trained on diverse multilingual data including Arabic
- **Efficient performance**: 8B parameter size balances capability and resource requirements
- **Instruction following**: Excellent at following structured instructions in multiple languages
- **Context understanding**: Good comprehension of Arabic context and nuances
- **JSON formatting**: Reliable at generating structured JSON outputs
## Configuration Settings for Qwen3-8B
Update your Django settings to use Qwen3-8B:
```python
# In settings.py
OLLAMA_BASE_URL = "http://localhost:11434"
OLLAMA_MODEL = "qwen3:8b"
OLLAMA_TIMEOUT = 120 # Seconds
```
## Optimized Parameters for Arabic
When initializing the Ollama LLM with Qwen3-8B for Arabic, use these optimized parameters:
```python
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support with Qwen3-8B.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
# Configure Ollama with parameters optimized for Qwen3-8B with Arabic
return Ollama(
base_url=base_url,
model=model,
timeout=timeout,
# Parameters optimized for Qwen3-8B with Arabic
parameters={
"temperature": 0.2, # Lower temperature for more deterministic outputs
"top_p": 0.8, # Slightly reduced for more focused responses
"top_k": 40, # Standard value works well with Qwen3
"num_ctx": 4096, # Qwen3 supports larger context windows
"num_predict": 2048, # Maximum tokens to generate
"stop": ["```", "</s>"], # Stop sequences for JSON generation
"repeat_penalty": 1.1 # Slight penalty to avoid repetition
}
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
```
## Prompt Template Optimization for Qwen3-8B
Qwen3-8B responds well to clear, structured prompts. For Arabic analysis, use this optimized template:
```python
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic with Qwen3-8B.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template optimized for Qwen3-8B
if language == 'ar':
template = """
أنت مساعد ذكي متخصص في تحليل نماذج Django. مهمتك هي تحليل الاستعلام التالي وتحديد:
1. نوع التحليل المطلوب
2. نماذج البيانات المستهدفة
3. أي معلمات استعلام
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON فقط، بدون أي نص إضافي، كما يلي:
```json
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
```
"""
else:
template = """
You are an intelligent assistant specialized in analyzing Django models. Your task is to analyze the following prompt and determine:
1. The type of analysis required
2. Target data models
3. Any query parameters
Prompt: {prompt}
Provide your answer in JSON format only, without any additional text, as follows:
```json
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
```
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return LLMChain(llm=llm, prompt=prompt_template)
```
## Improved JSON Parsing for Qwen3-8B Responses
Qwen3-8B sometimes includes markdown formatting in its JSON responses. Use this improved parsing function:
```python
def _parse_llm_json_response(result):
"""
Parse JSON from Qwen3-8B response, handling markdown formatting.
"""
try:
# First try to extract JSON from markdown code blocks
json_match = re.search(r'```(?:json)?\s*([\s\S]*?)\s*```', result)
if json_match:
json_str = json_match.group(1).strip()
return json.loads(json_str)
# If no markdown blocks, try to find JSON object directly
json_match = re.search(r'({[\s\S]*})', result)
if json_match:
json_str = json_match.group(1).strip()
return json.loads(json_str)
# If still no match, try to parse the entire response as JSON
return json.loads(result.strip())
except Exception as e:
logger.warning(f"Failed to parse JSON from LLM response: {str(e)}")
return None
```
## Performance Considerations for Qwen3-8B
- **Memory Usage**: Qwen3-8B typically requires 8-16GB of RAM when running on Ollama
- **First Request Latency**: The first request may take 5-10 seconds as the model loads
- **Subsequent Requests**: Typically respond within 1-3 seconds
- **Batch Processing**: Consider batching multiple analyses for efficiency
## Handling Arabic-Specific Challenges with Qwen3-8B
1. **Diacritics**: Qwen3-8B handles Arabic diacritics well, but for consistency, consider normalizing input by removing diacritics
2. **Text Direction**: When displaying results in frontend, ensure proper RTL (right-to-left) support
3. **Dialectal Variations**: Qwen3-8B performs best with Modern Standard Arabic (MSA), but has reasonable support for major dialects
4. **Technical Terms**: For Django-specific technical terms, consider providing a glossary in both English and Arabic
## Example Arabic Prompts Optimized for Qwen3-8B
```
# Count query
كم عدد السيارات المتوفرة في النظام؟
# Relationship analysis
ما هي العلاقة بين نموذج المستخدم ونموذج الطلب؟
# Performance analysis
حدد مشاكل الأداء المحتملة في نموذج المنتج
# Statistical analysis
ما هو متوسط سعر السيارات المتوفرة؟
```
## Troubleshooting Qwen3-8B Specific Issues
1. **Incomplete JSON**: If Qwen3-8B returns incomplete JSON, try:
- Reducing the complexity of your prompt
- Lowering the temperature parameter to 0.1
- Adding explicit JSON formatting instructions
2. **Arabic Character Encoding**: If you see garbled Arabic text, ensure:
- Your database uses UTF-8 encoding
- All HTTP responses include proper content-type headers
- Frontend properly handles Arabic character rendering
3. **Slow Response Times**: If responses are slow:
- Consider using the quantized version: `qwen3:8b-q4_0`
- Reduce context window size if full 4096 context isn't needed
- Implement more aggressive caching
## Conclusion
Qwen3-8B is an excellent choice for Arabic language support in your Django AI Analyst application. With these optimized settings and techniques, you'll get reliable performance for analyzing Django models through Arabic natural language prompts.

163
haikalbot/README.md Normal file
View File

@ -0,0 +1,163 @@
# Django AI Analyst - README
This package provides a Django application that enables AI-powered analysis of Django models through natural language prompts. The AI agent can analyze model structures, relationships, and data to provide insights in JSON format.
## Features
- Natural language prompt processing for model analysis
- Support for various types of insights:
- Count queries (e.g., "How many cars do we have?")
- Relationship analysis between models
- Performance optimization suggestions
- Statistical analysis of model fields
- General model structure analysis
- Dealer-specific data access controls
- Caching mechanism for improved performance
- Visualization data generation for frontend display
- Comprehensive test suite
## Installation
1. Add 'ai_analyst' to your INSTALLED_APPS setting:
```python
INSTALLED_APPS = [
...
'ai_analyst',
]
```
2. Include the ai_analyst URLconf in your project urls.py:
```python
path('api/ai/', include('ai_analyst.urls')),
```
3. Run migrations to create the AnalysisCache model:
```bash
python manage.py makemigrations ai_analyst
python manage.py migrate
```
## Usage
Send POST requests to the `/api/ai/analyze/` endpoint with a JSON body containing:
```json
{
"prompt": "How many cars do we have?",
"dealer_id": 1 // Optional, for dealer-specific queries
}
```
The response will be a JSON object with insights based on the prompt:
```json
{
"status": "success",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"prompt": "How many cars do we have?",
"insights": [
{
"type": "count_analysis",
"results": [
{
"model": "Car",
"count": 42,
"filters_applied": {}
}
],
"visualization_data": {
"chart_type": "bar",
"labels": ["Car"],
"data": [42]
}
}
]
}
```
## Customization
### Cache Duration
You can customize the cache duration by setting the `CACHE_DURATION` class variable in the `ModelAnalystView` class:
```python
# In your settings.py
AI_ANALYST_CACHE_DURATION = 7200 # 2 hours in seconds
# Then in views.py
class ModelAnalystView(View):
CACHE_DURATION = getattr(settings, 'AI_ANALYST_CACHE_DURATION', 3600)
# ...
```
### Permission Logic
The `_check_permissions` method in `ModelAnalystView` can be customized to match your application's permission model:
```python
def _check_permissions(self, user, dealer_id):
# Your custom permission logic here
return user.has_perm('ai_analyst.can_analyze_models')
```
## Example Prompts
- "How many cars do we have?"
- "Show relationship between User and Order"
- "What is the average price of products?"
- "Count active users"
- "Identify performance issues in the Order model"
- "Show maximum age of customers"
## Frontend Integration
The JSON responses include visualization_data that can be used with charting libraries like Chart.js:
```javascript
// Example with Chart.js
fetch('/api/ai/analyze/', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt: 'How many cars do we have?',
dealer_id: 1
}),
})
.then(response => response.json())
.then(data => {
if (data.status === 'success' && data.insights.length > 0) {
const insight = data.insights[0];
const vizData = insight.visualization_data;
const ctx = document.getElementById('insightChart').getContext('2d');
new Chart(ctx, {
type: vizData.chart_type,
data: {
labels: vizData.labels,
datasets: [{
label: insight.type,
data: vizData.data,
backgroundColor: [
'rgba(255, 99, 132, 0.2)',
'rgba(54, 162, 235, 0.2)',
'rgba(255, 206, 86, 0.2)'
],
borderColor: [
'rgba(255, 99, 132, 1)',
'rgba(54, 162, 235, 1)',
'rgba(255, 206, 86, 1)'
],
borderWidth: 1
}]
}
});
}
});
```

View File

@ -1,2 +1,15 @@
from django.contrib import admin
from .models import AnalysisCache
# Register your models here.
@admin.register(AnalysisCache)
class AnalysisCacheAdmin(admin.ModelAdmin):
list_display = ('prompt_hash', 'dealer_id', 'created_at', 'expires_at', 'is_expired')
list_filter = ('dealer_id', 'created_at')
search_fields = ('prompt_hash',)
readonly_fields = ('prompt_hash', 'created_at', 'updated_at')
def is_expired(self, obj):
return obj.is_expired()
is_expired.boolean = True

231
haikalbot/analysis_utils.py Normal file
View File

@ -0,0 +1,231 @@
from django.db.models import Avg, Sum, Max, Min, ForeignKey, OneToOneField
import inspect
from django.db import models
from django.utils.translation import gettext_lazy as _
def _localized_keys(language):
if language == 'ar':
return {
'type': 'نوع', 'model': 'النموذج', 'count': 'العدد', 'filters': 'الفلاتر_المطبقة',
'error': 'خطأ', 'chart_type': 'نوع_الرسم_البياني', 'labels': 'التسميات', 'data': 'البيانات',
'visualization_data': 'بيانات_الرسم_البياني', 'field': 'الحقل', 'value': 'القيمة',
'statistic_type': 'نوع_الإحصاء', 'results': 'النتائج', 'title': 'العنوان'
}
else:
return {
'type': 'type', 'model': 'model', 'count': 'count', 'filters': 'filters_applied',
'error': 'error', 'chart_type': 'chart_type', 'labels': 'labels', 'data': 'data',
'visualization_data': 'visualization_data', 'field': 'field', 'value': 'value',
'statistic_type': 'statistic_type', 'results': 'results', 'title': 'title'
}
def generate_count_insight(models, query_params, dealer_id=None, language='ar'):
keys = _localized_keys(language)
results = []
for model in models:
try:
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for key, value in query_params.items():
if key not in ['field', 'operation'] and hasattr(model, key):
try:
field = model._meta.get_field(key)
if isinstance(field, models.IntegerField):
value = int(value)
elif isinstance(field, models.BooleanField):
value = value.lower() in ('true', '1', 'yes')
except Exception:
pass
filters[key] = value
if filters:
queryset = queryset.filter(**filters)
results.append({
keys['model']: model.__name__,
keys['count']: queryset.count(),
keys['filters']: filters
})
except Exception as e:
results.append({
keys['model']: model.__name__,
keys['error']: str(e)
})
return {
'type': keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [r[keys['model']] for r in results if keys['count'] in r],
keys['data']: [r[keys['count']] for r in results if keys['count'] in r]
}
}
def generate_statistics_insight(models, query_params, dealer_id=None, language='ar'):
keys = _localized_keys(language)
results = []
field = query_params.get('field')
operation = query_params.get('operation', 'average')
for model in models:
try:
if not field or not hasattr(model, field):
continue
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for k, v in query_params.items():
if k not in ['field', 'operation'] and hasattr(model, k):
filters[k] = v
if filters:
queryset = queryset.filter(**filters)
stat_map = {
'average': Avg,
'sum': Sum,
'max': Max,
'min': Min
}
if operation in stat_map:
agg = queryset.aggregate(val=stat_map[operation](field))['val']
value = agg
else:
value = queryset.count()
results.append({
keys['model']: model.__name__,
keys['field']: field,
keys['statistic_type']: operation,
keys['value']: value,
keys['filters']: filters
})
except Exception as e:
results.append({keys['model']: model.__name__, keys['error']: str(e)})
return {
'type': keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [f"{r[keys['model']]}.{r[keys['field']]}" for r in results if keys['value'] in r],
keys['data']: [r[keys['value']] for r in results if keys['value'] in r],
keys['title']: f"{operation} of {field}" if language != 'ar' else f"{field} ({operation})"
}
}
def generate_recommendations(model_classes, analysis_type, language='ar'):
recs = []
for model in model_classes:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
msg = f"أضف db_index=True إلى {model.__name__}.{field.name}" if language == 'ar' else f"Add db_index=True to {model.__name__}.{field.name}"
recs.append(msg)
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title', 'description', 'text']:
msg = f"فكر في فهرسة الحقل النصي {model.__name__}.{field.name}" if language == 'ar' else f"Consider indexing the text field {model.__name__}.{field.name}"
recs.append(msg)
return recs[:5]
def generate_model_insight(model, dealer_id=None, language='ar'):
keys = _localized_keys(language)
fields_info = [
{
'name': f.name,
'type': f.__class__.__name__,
'null': f.null,
'blank': f.blank,
'unique': f.unique,
'pk': f.primary_key
} for f in model._meta.fields
]
try:
qs = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
qs = qs.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
qs = qs.filter(dealer=dealer_id)
count = qs.count()
except Exception:
count = "error"
return {
'type': keys['type'] + '_analysis',
keys['model']: model.__name__,
'fields': fields_info,
'count': count
}
def generate_relationship_insight(models, query_params=None, dealer_id=None, language='ar'):
from_ = "من" if language == 'ar' else "from"
to_ = "إلى" if language == 'ar' else "to"
rel_type = "نوع" if language == 'ar' else "type"
relationships = []
for model in models:
for field in model._meta.fields:
if isinstance(field, (ForeignKey, OneToOneField)):
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: field.__class__.__name__
})
for field in model._meta.many_to_many:
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: 'ManyToManyField'
})
return {
'type': 'تحليل_العلاقات' if language == 'ar' else 'relationship_analysis',
'relationships': relationships
}
def generate_performance_insight(models, query_params=None, dealer_id=None, language='ar'):
issues = []
for model in models:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Missing index on ForeignKey'
})
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title']:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Unindexed CharField used in filtering'
})
return {
'type': 'تحليل_الأداء' if language == 'ar' else 'performance_analysis',
'issues': issues
}

View File

@ -3,20 +3,7 @@ from inventory import models
from car_inventory import settings from car_inventory import settings
def fetch_data(dealer): def fetch_data(dealer):
"""
Fetches the total number of cars in the inventory for the specified dealer. If no cars are
found, returns a message indicating that fact. If an error occurs during the operation,
it returns an error message with details.
:param dealer: The dealer object for which the inventory information is required.
The dealer object must be an instance of a model that includes a
`get_local_name` method for formatting localized dealer names.
:type dealer: Dealer
:return: A string indicating either the total number of cars in the dealer's inventory,
that no cars exist in the inventory, or an error message detailing what went
wrong during the operation.
:rtype: str
"""
try: try:
# Annotate total cars by make, model, and trim # Annotate total cars by make, model, and trim
cars = models.Car.objects.filter(dealer=dealer).count() cars = models.Car.objects.filter(dealer=dealer).count()

BIN
haikalbot/haikalbot_01.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@ -0,0 +1,312 @@
# Integrating Ollama with LangChain for Django AI Analyst
This guide provides step-by-step instructions for integrating Ollama with LangChain in your Django AI Analyst application, with specific focus on Arabic language support.
## Prerequisites
1. Ollama installed on your system
2. An Ollama model with Arabic support (preferably Jais-13B as recommended)
3. Django project with the AI Analyst application
## Installation Steps
### 1. Install Required Python Packages
```bash
pip install langchain langchain-community
```
### 2. Configure Django Settings
Add the following to your Django settings.py file:
```python
# Ollama and LangChain settings
OLLAMA_BASE_URL = "http://localhost:11434" # Default Ollama API URL
OLLAMA_MODEL = "jais:13b" # Or your preferred model
OLLAMA_TIMEOUT = 120 # Seconds
```
### 3. Create a LangChain Utility Module
Create a new file `ai_analyst/langchain_utils.py`:
```python
from langchain.llms import Ollama
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'jais:13b')
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
# Configure Ollama with appropriate parameters for Arabic
return Ollama(
base_url=base_url,
model=model,
timeout=timeout,
# Parameters to improve Arabic language generation
parameters={
"temperature": 0.7,
"top_p": 0.9,
"top_k": 40,
"num_ctx": 2048, # Context window size
}
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return LLMChain(llm=llm, prompt=prompt_template)
```
### 4. Update Your View to Use LangChain
Modify your `ModelAnalystView` class to use the LangChain utilities:
```python
from .langchain_utils import create_prompt_analyzer_chain
import json
import re
class ModelAnalystView(View):
# ... existing code ...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We'll initialize chains on demand to avoid startup issues
self.prompt_analyzer_chains = {}
def _get_prompt_analyzer_chain(self, language='ar'):
"""
Get or create a prompt analyzer chain for the specified language.
"""
if language not in self.prompt_analyzer_chains:
self.prompt_analyzer_chains[language] = create_prompt_analyzer_chain(language)
return self.prompt_analyzer_chains[language]
def _analyze_prompt_with_llm(self, prompt, language='ar'):
"""
Use LangChain and Ollama to analyze the prompt.
"""
try:
# Get the appropriate chain for the language
chain = self._get_prompt_analyzer_chain(language)
if not chain:
# Fallback to rule-based analysis if chain creation failed
return self._analyze_prompt_rule_based(prompt, language)
# Run the chain
result = chain.run(prompt=prompt)
# Parse the JSON result
# Find JSON content within the response (in case the LLM adds extra text)
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
if json_match:
json_str = json_match.group(1)
return json.loads(json_str)
else:
# Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language)
except Exception as e:
logger.error(f"Error in LLM prompt analysis: {str(e)}")
# Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language)
def _analyze_prompt_rule_based(self, prompt, language='ar'):
"""
Rule-based fallback for prompt analysis.
"""
analysis_type, target_models, query_params = self._analyze_prompt(prompt, language)
return {
"analysis_type": analysis_type,
"target_models": target_models,
"query_params": query_params
}
def _process_prompt(self, prompt, user, dealer_id, language='ar'):
"""
Process the natural language prompt and generate insights.
"""
# ... existing code ...
# Use LLM for prompt analysis
analysis_result = self._analyze_prompt_with_llm(prompt, language)
analysis_type = analysis_result.get('analysis_type', 'general')
target_models = analysis_result.get('target_models', [])
query_params = analysis_result.get('query_params', {})
# ... rest of the method ...
```
## Testing the Integration
Create a test script to verify the Ollama and LangChain integration:
```python
# test_ollama.py
import os
import sys
import django
# Set up Django environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'your_project.settings')
django.setup()
from ai_analyst.langchain_utils import get_ollama_llm, create_prompt_analyzer_chain
def test_ollama_connection():
"""Test basic Ollama connection and response."""
llm = get_ollama_llm()
if not llm:
print("Failed to initialize Ollama LLM")
return
# Test with Arabic prompt
arabic_prompt = "مرحبا، كيف حالك؟"
print(f"Testing Arabic prompt: {arabic_prompt}")
try:
response = llm.invoke(arabic_prompt)
print(f"Response: {response}")
print("Ollama connection successful!")
except Exception as e:
print(f"Error: {str(e)}")
def test_prompt_analysis():
"""Test the prompt analyzer chain."""
chain = create_prompt_analyzer_chain('ar')
if not chain:
print("Failed to create prompt analyzer chain")
return
# Test with an Arabic analysis prompt
analysis_prompt = "كم عدد السيارات التي لدينا؟"
print(f"Testing analysis prompt: {analysis_prompt}")
try:
result = chain.run(prompt=analysis_prompt)
print(f"Analysis result: {result}")
except Exception as e:
print(f"Error: {str(e)}")
if __name__ == "__main__":
print("Testing Ollama and LangChain integration...")
test_ollama_connection()
print("\n---\n")
test_prompt_analysis()
```
Run the test script:
```bash
python test_ollama.py
```
## Troubleshooting
### Common Issues and Solutions
1. **Ollama Connection Error**
- Ensure Ollama is running: `ollama serve`
- Check if the model is downloaded: `ollama list`
- Verify the base URL in settings
2. **Model Not Found**
- Download the model: `ollama pull jais:13b`
- Check model name spelling in settings
3. **Timeout Errors**
- Increase the timeout setting for complex queries
- Consider using a smaller model if your hardware is limited
4. **Poor Arabic Analysis**
- Ensure you're using an Arabic-capable model like Jais-13B
- Check that your prompts are properly formatted in Arabic
- Adjust temperature and other parameters for better results
5. **JSON Parsing Errors**
- Improve the prompt template to emphasize strict JSON formatting
- Implement more robust JSON extraction from LLM responses
## Performance Optimization
For production use, consider these optimizations:
1. **Caching LLM Responses**
- Implement Redis or another caching system for LLM responses
- Cache common analysis patterns to reduce API calls
2. **Batch Processing**
- For bulk analysis, use batch processing to reduce overhead
3. **Model Quantization**
- If performance is slow, consider using a quantized version of the model
- Example: `ollama pull jais:13b-q4_0` for a 4-bit quantized version
4. **Asynchronous Processing**
- For long-running analyses, implement asynchronous processing with Celery
## Advanced Usage: Fine-tuning for Domain-Specific Analysis
For improved performance on your specific domain:
1. Create a dataset of example prompts and expected analyses
2. Use Ollama's fine-tuning capabilities to adapt the model
3. Update your application to use the fine-tuned model
## Conclusion
This integration enables your Django AI Analyst to leverage Ollama's powerful language models through LangChain, with specific optimizations for Arabic language support. The fallback to rule-based analysis ensures robustness, while the LLM-based approach provides more natural language understanding capabilities.

View File

@ -1,4 +1,4 @@
# Generated by Django 5.1.7 on 2025-05-04 16:07 # Generated by Django 5.2.1 on 2025-05-25 23:01
from django.db import migrations, models from django.db import migrations, models

View File

@ -1,4 +1,4 @@
# Generated by Django 5.1.7 on 2025-05-04 16:07 # Generated by Django 5.2.1 on 2025-05-25 23:01
import django.db.models.deletion import django.db.models.deletion
from django.db import migrations, models from django.db import migrations, models

View File

@ -0,0 +1,33 @@
# Generated by Django 5.2.1 on 2025-05-26 00:28
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('haikalbot', '0002_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AnalysisCache',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prompt_hash', models.CharField(db_index=True, max_length=64)),
('dealer_id', models.IntegerField(blank=True, db_index=True, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(auto_now=True)),
('expires_at', models.DateTimeField()),
('result', models.JSONField()),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'indexes': [models.Index(fields=['prompt_hash', 'dealer_id'], name='haikalbot_a_prompt__b98e1e_idx'), models.Index(fields=['expires_at'], name='haikalbot_a_expires_e790cd_idx')],
},
),
]

View File

@ -0,0 +1,36 @@
# Generated by Django 5.2.1 on 2025-05-26 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('haikalbot', '0003_analysiscache'),
('inventory', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='analysiscache',
options={'verbose_name_plural': 'Analysis caches'},
),
migrations.AlterModelOptions(
name='chatlog',
options={'ordering': ['-timestamp']},
),
migrations.AlterField(
model_name='analysiscache',
name='expires_at',
field=models.DateTimeField(db_index=True),
),
migrations.AlterField(
model_name='chatlog',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AddIndex(
model_name='chatlog',
index=models.Index(fields=['dealer', 'timestamp'], name='haikalbot_c_dealer__6f8d63_idx'),
),
]

View File

@ -1,5 +1,8 @@
from django.db import models from django.db import models
from inventory.models import Dealer from inventory.models import Dealer
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
class ChatLog(models.Model): class ChatLog(models.Model):
@ -21,10 +24,67 @@ class ChatLog(models.Model):
:ivar timestamp: The date and time when the chat log entry was created. :ivar timestamp: The date and time when the chat log entry was created.
:type timestamp: datetime :type timestamp: datetime
""" """
dealer = models.ForeignKey(Dealer, on_delete=models.CASCADE, related_name='chatlogs') dealer = models.ForeignKey(Dealer, on_delete=models.CASCADE, related_name='chatlogs', db_index=True)
user_message = models.TextField() user_message = models.TextField()
chatbot_response = models.TextField() chatbot_response = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True) timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
ordering = ['-timestamp']
indexes = [
models.Index(fields=['dealer', 'timestamp']),
]
def __str__(self): def __str__(self):
return self.user_message return f"{self.dealer.name}: {self.user_message[:50]}..."
class AnalysisCache(models.Model):
"""
Model to cache analysis results for performance optimization.
This model stores cached results of model analysis operations to improve
performance for repeated queries. It includes a hash of the prompt, user
information, dealer ID, timestamps, and the cached result in JSON format.
:ivar prompt_hash: MD5 hash of the prompt + dealer_id + language
:type prompt_hash: str
:ivar user: The user who made the request (optional)
:type user: User
:ivar dealer_id: ID of the dealer associated with this cache entry
:type dealer_id: int
:ivar created_at: When the cache entry was created
:type created_at: datetime
:ivar updated_at: When the cache entry was last updated
:type updated_at: datetime
:ivar expires_at: When the cache entry expires
:type expires_at: datetime
:ivar result: The cached analysis result
:type result: dict
"""
prompt_hash = models.CharField(max_length=64, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)
dealer_id = models.IntegerField(null=True, blank=True, db_index=True)
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(db_index=True)
result = models.JSONField()
class Meta:
indexes = [
models.Index(fields=['prompt_hash', 'dealer_id']),
models.Index(fields=['expires_at']),
]
verbose_name_plural = "Analysis caches"
def is_expired(self):
"""
Check if the cache entry has expired.
:return: True if the cache entry has expired, False otherwise
:rtype: bool
"""
return timezone.now() > self.expires_at
def __str__(self):
return f"Cache: {self.prompt_hash[:10]}... (Dealer: {self.dealer_id})"

View File

@ -0,0 +1,76 @@
# Recommended Ollama Models for Arabic Language Support
## Top Recommendations
1. **Jais-13B** (Recommended)
- **Size**: 13 billion parameters
- **Strengths**: Specifically trained on Arabic content, excellent understanding of Arabic context and nuances
- **Command**: `ollama pull jais:13b`
- **Best for**: Production-quality Arabic language understanding and generation
2. **BLOOM-7B**
- **Size**: 7 billion parameters
- **Strengths**: Trained on 46 languages including Arabic, good multilingual capabilities
- **Command**: `ollama pull bloom:7b`
- **Best for**: Multilingual applications where Arabic is one of several languages
3. **Mistral-7B-Instruct**
- **Size**: 7 billion parameters
- **Strengths**: Strong general performance, good instruction following, reasonable Arabic support
- **Command**: `ollama pull mistral:7b-instruct`
- **Best for**: General purpose applications with moderate Arabic requirements
4. **Qwen2-7B**
- **Size**: 7 billion parameters
- **Strengths**: Good multilingual capabilities including Arabic
- **Command**: `ollama pull qwen2:7b`
- **Best for**: Applications requiring both Chinese and Arabic support
## Comparison Table
| Model | Size | Arabic Support | Instruction Following | Resource Requirements | Command |
|-------|------|---------------|----------------------|----------------------|---------|
| Jais-13B | 13B | Excellent | Very Good | High (16GB+ RAM) | `ollama pull jais:13b` |
| BLOOM-7B | 7B | Good | Good | Medium (8GB+ RAM) | `ollama pull bloom:7b` |
| Mistral-7B-Instruct | 7B | Moderate | Excellent | Medium (8GB+ RAM) | `ollama pull mistral:7b-instruct` |
| Qwen2-7B | 7B | Good | Very Good | Medium (8GB+ RAM) | `ollama pull qwen2:7b` |
## Justification for Jais-13B Recommendation
Jais-13B is specifically recommended for your Django AI Analyst application because:
1. **Arabic-First Design**: Unlike most models that treat Arabic as one of many languages, Jais was specifically designed for Arabic language understanding and generation.
2. **Cultural Context**: The model has better understanding of Arabic cultural contexts and nuances, which is important for analyzing domain-specific queries about your data models.
3. **Technical Terminology**: Better handling of technical terms in Arabic, which is crucial for a model analyzing Django models and database structures.
4. **Instruction Following**: Good ability to follow complex instructions in Arabic, which is essential for your prompt-based analysis system.
5. **Performance on Analytical Tasks**: Superior performance on analytical and reasoning tasks in Arabic compared to general multilingual models.
If your system has limited resources (less than 12GB RAM), Mistral-7B-Instruct would be the next best alternative, offering a good balance between performance and resource requirements.
## Installation Instructions
To install the recommended Jais-13B model:
```bash
ollama pull jais:13b
```
For systems with limited resources, install Mistral-7B-Instruct instead:
```bash
ollama pull mistral:7b-instruct
```
After installation, update the `OLLAMA_MODEL` setting in your Django view:
```python
# For Jais-13B
OLLAMA_MODEL = 'jais:13b'
# OR for Mistral-7B-Instruct if resources are limited
# OLLAMA_MODEL = 'mistral:7b-instruct'
```

View File

@ -0,0 +1,319 @@
from django.db.models import Avg, Sum, Max, Min, ForeignKey, OneToOneField
import inspect
from django.db import models
from django.utils.translation import gettext_lazy as _
def _localized_keys(language):
"""
Get localized key names based on language.
:param language: Language code ('en' or 'ar')
:type language: str
:return: Dictionary of localized keys
:rtype: dict
"""
if language == 'ar':
return {
'type': 'نوع', 'model': 'النموذج', 'count': 'العدد', 'filters': 'الفلاتر_المطبقة',
'error': 'خطأ', 'chart_type': 'نوع_الرسم_البياني', 'labels': 'التسميات', 'data': 'البيانات',
'visualization_data': 'بيانات_الرسم_البياني', 'field': 'الحقل', 'value': 'القيمة',
'statistic_type': 'نوع_الإحصاء', 'results': 'النتائج', 'title': 'العنوان'
}
else:
return {
'type': 'type', 'model': 'model', 'count': 'count', 'filters': 'filters_applied',
'error': 'error', 'chart_type': 'chart_type', 'labels': 'labels', 'data': 'data',
'visualization_data': 'visualization_data', 'field': 'field', 'value': 'value',
'statistic_type': 'statistic_type', 'results': 'results', 'title': 'title'
}
def generate_count_insight(models, query_params, dealer_id=None, language='en'):
"""
Generate count insights for the specified models.
:param models: List of models to analyze
:type models: list
:param query_params: Query parameters for filtering
:type query_params: dict
:param dealer_id: Dealer ID for filtering
:type dealer_id: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Count insights
:rtype: dict
"""
keys = _localized_keys(language)
results = []
for model in models:
try:
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for key, value in query_params.items():
if key not in ['field', 'operation'] and hasattr(model, key):
try:
field = model._meta.get_field(key)
if isinstance(field, models.IntegerField):
value = int(value)
elif isinstance(field, models.BooleanField):
value = value.lower() in ('true', '1', 'yes')
except Exception:
pass
filters[key] = value
if filters:
queryset = queryset.filter(**filters)
results.append({
keys['model']: model.__name__,
keys['count']: queryset.count(),
keys['filters']: filters
})
except Exception as e:
results.append({
keys['model']: model.__name__,
keys['error']: str(e)
})
return {
keys['type']: keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [r[keys['model']] for r in results if keys['count'] in r],
keys['data']: [r[keys['count']] for r in results if keys['count'] in r]
}
}
def generate_statistics_insight(models, query_params, dealer_id=None, language='en'):
"""
Generate statistical insights for the specified models.
:param models: List of models to analyze
:type models: list
:param query_params: Query parameters for filtering
:type query_params: dict
:param dealer_id: Dealer ID for filtering
:type dealer_id: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Statistical insights
:rtype: dict
"""
keys = _localized_keys(language)
results = []
field = query_params.get('field')
operation = query_params.get('operation', 'average')
for model in models:
try:
if not field or not hasattr(model, field):
continue
queryset = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
queryset = queryset.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
queryset = queryset.filter(dealer=dealer_id)
filters = {}
for k, v in query_params.items():
if k not in ['field', 'operation'] and hasattr(model, k):
filters[k] = v
if filters:
queryset = queryset.filter(**filters)
stat_map = {
'average': Avg,
'sum': Sum,
'max': Max,
'min': Min
}
if operation in stat_map:
agg = queryset.aggregate(val=stat_map[operation](field))['val']
value = agg
else:
value = queryset.count()
results.append({
keys['model']: model.__name__,
keys['field']: field,
keys['statistic_type']: operation,
keys['value']: value,
keys['filters']: filters
})
except Exception as e:
results.append({keys['model']: model.__name__, keys['error']: str(e)})
return {
keys['type']: keys['type'] + '_analysis',
keys['results']: results,
keys['visualization_data']: {
keys['chart_type']: 'bar',
keys['labels']: [f"{r[keys['model']]}.{r[keys['field']]}" for r in results if keys['value'] in r],
keys['data']: [r[keys['value']] for r in results if keys['value'] in r],
keys['title']: f"{operation} of {field}" if language != 'ar' else f"{field} ({operation})"
}
}
def generate_recommendations(model_classes, analysis_type, language='en'):
"""
Generate recommendations based on model analysis.
:param model_classes: List of models to analyze
:type model_classes: list
:param analysis_type: Type of analysis
:type analysis_type: str
:param language: Language code ('en' or 'ar')
:type language: str
:return: List of recommendations
:rtype: list
"""
recs = []
for model in model_classes:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
msg = f"أضف db_index=True إلى {model.__name__}.{field.name}" if language == 'ar' else f"Add db_index=True to {model.__name__}.{field.name}"
recs.append(msg)
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title', 'description', 'text']:
msg = f"فكر في فهرسة الحقل النصي {model.__name__}.{field.name}" if language == 'ar' else f"Consider indexing the text field {model.__name__}.{field.name}"
recs.append(msg)
return recs[:5]
def generate_model_insight(model, dealer_id=None, language='en'):
"""
Generate insights for a specific model.
:param model: Model to analyze
:type model: Model class
:param dealer_id: Dealer ID for filtering
:type dealer_id: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Model insights
:rtype: dict
"""
keys = _localized_keys(language)
fields_info = [
{
'name': f.name,
'type': f.__class__.__name__,
'null': f.null,
'blank': f.blank,
'unique': f.unique,
'pk': f.primary_key
} for f in model._meta.fields
]
try:
qs = model.objects.all()
if dealer_id:
if hasattr(model, 'dealer_id'):
qs = qs.filter(dealer_id=dealer_id)
elif hasattr(model, 'dealer'):
qs = qs.filter(dealer=dealer_id)
count = qs.count()
except Exception:
count = "error"
return {
keys['type']: keys['type'] + '_analysis',
keys['model']: model.__name__,
'fields': fields_info,
'count': count
}
def generate_relationship_insight(models, query_params=None, dealer_id=None, language='en'):
"""
Generate relationship insights between models.
:param models: List of models to analyze
:type models: list
:param query_params: Query parameters (unused)
:type query_params: dict
:param dealer_id: Dealer ID (unused)
:type dealer_id: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Relationship insights
:rtype: dict
"""
from_ = "من" if language == 'ar' else "from"
to_ = "إلى" if language == 'ar' else "to"
rel_type = "نوع" if language == 'ar' else "type"
relationships = []
for model in models:
for field in model._meta.fields:
if isinstance(field, (ForeignKey, OneToOneField)):
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: field.__class__.__name__
})
for field in model._meta.many_to_many:
relationships.append({
from_: model.__name__,
to_: field.related_model.__name__,
rel_type: 'ManyToManyField'
})
return {
'type': 'تحليل_العلاقات' if language == 'ar' else 'relationship_analysis',
'relationships': relationships
}
def generate_performance_insight(models, query_params=None, dealer_id=None, language='en'):
"""
Generate performance insights for models.
:param models: List of models to analyze
:type models: list
:param query_params: Query parameters (unused)
:type query_params: dict
:param dealer_id: Dealer ID (unused)
:type dealer_id: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Performance insights
:rtype: dict
"""
issues = []
for model in models:
for field in model._meta.fields:
if isinstance(field, ForeignKey) and not field.db_index:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Missing index on ForeignKey'
})
if isinstance(field, models.CharField) and not field.db_index and field.name in ['name', 'title']:
issues.append({
'model': model.__name__,
'field': field.name,
'issue': 'Unindexed CharField used in filtering'
})
return {
'type': 'تحليل_الأداء' if language == 'ar' else 'performance_analysis',
'issues': issues
}

View File

@ -0,0 +1,101 @@
from django.utils import timezone
from django.db import models
from ..models import AnalysisCache
import hashlib
import logging
logger = logging.getLogger(__name__)
class CacheService:
"""
Service for handling analysis result caching operations.
This service provides methods for generating cache keys, retrieving
cached results, and storing new results in the cache.
"""
def generate_hash(self, prompt, dealer_id, language):
"""
Generate a unique hash for the prompt, dealer, and language combination.
:param prompt: The user's prompt text
:type prompt: str
:param dealer_id: The dealer's ID
:type dealer_id: int
:param language: The language code
:type language: str
:return: MD5 hash string
:rtype: str
"""
cache_key = f"{prompt}:{dealer_id or 'all'}:{language}"
return hashlib.md5(cache_key.encode()).hexdigest()
def get_cached_result(self, prompt_hash, user, dealer_id):
"""
Retrieve a cached result if available and not expired.
:param prompt_hash: The hash key for the cache entry
:type prompt_hash: str
:param user: The user making the request
:type user: User
:param dealer_id: The dealer's ID
:type dealer_id: int
:return: Cached result or None if not found
:rtype: dict or None
"""
try:
cache_entry = AnalysisCache.objects.filter(
prompt_hash=prompt_hash,
dealer_id=dealer_id,
expires_at__gt=timezone.now()
).first()
# If user is authenticated, also check user-specific cache
if user and user.is_authenticated:
user_cache = AnalysisCache.objects.filter(
prompt_hash=prompt_hash,
user=user,
expires_at__gt=timezone.now()
).first()
# User-specific cache takes precedence
if user_cache:
return user_cache.result
return cache_entry.result if cache_entry else None
except Exception as e:
logger.warning(f"Error retrieving cache: {str(e)}")
return None
def cache_result(self, prompt_hash, result, user, dealer_id, duration=3600):
"""
Store a result in the cache.
:param prompt_hash: The hash key for the cache entry
:type prompt_hash: str
:param result: The result to cache
:type result: dict
:param user: The user making the request
:type user: User
:param dealer_id: The dealer's ID
:type dealer_id: int
:param duration: Cache duration in seconds
:type duration: int
:return: None
"""
try:
# Calculate expiration time
expires_at = timezone.now() + timezone.timedelta(seconds=duration)
# Create or update cache entry
AnalysisCache.objects.update_or_create(
prompt_hash=prompt_hash,
user=user if user and user.is_authenticated else None,
dealer_id=dealer_id,
defaults={
'result': result,
'expires_at': expires_at
}
)
except Exception as e:
logger.warning(f"Error caching result: {str(e)}")

View File

@ -0,0 +1,99 @@
from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_llm_instance():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
This function creates a new LLM instance with optimized parameters for
both Arabic and English language processing. It reads configuration from
Django settings or uses sensible defaults.
:return: Configured OllamaLLM instance or None if initialization fails
:rtype: OllamaLLM or None
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)
top_k = getattr(settings, 'OLLAMA_TOP_K', 40)
num_ctx = getattr(settings, 'OLLAMA_NUM_CTX', 4096)
num_predict = getattr(settings, 'OLLAMA_NUM_PREDICT', 2048)
return OllamaLLM(
base_url=base_url,
model=model,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_ctx=num_ctx,
num_predict=num_predict,
stop=["```", "</s>"],
repeat_penalty=1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def get_llm_chain(language='en'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
This function creates a chain that processes user prompts and extracts
structured information about the analysis request. It supports both
Arabic and English languages.
:param language: Language code ('en' or 'ar')
:type language: str
:return: LangChain for prompt analysis or None if initialization fails
:rtype: LLMChain or None
"""
llm = get_llm_instance()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return prompt_template | llm

80
haikalbot/temp.txt Normal file
View File

@ -0,0 +1,80 @@
from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_ollama_llm():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
# timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
return OllamaLLM(
base_url=base_url,
model=model,
temperature= 0.2,
top_p= 0.8,
top_k= 40,
num_ctx= 4096,
num_predict= 2048,
stop= ["```", "</s>"],
repeat_penalty= 1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def create_prompt_analyzer_chain(language='ar'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
"""
llm = get_ollama_llm()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return prompt_template | llm

View File

@ -0,0 +1,161 @@
# Training Prompt for Django Model Analyst AI Agent
## Agent Purpose
You are a specialized AI agent designed to analyze Django models and provide insightful information to users. Your primary function is to interpret Django model structures, relationships, and metadata to generate meaningful insights that help developers and stakeholders understand their data models better.
## Core Capabilities
1. Parse and understand Django model definitions
2. Identify relationships between models (ForeignKey, ManyToMany, OneToOne)
3. Analyze model fields, types, constraints, and metadata
4. Generate statistics and insights about model usage and structure
5. Provide recommendations for model optimization
6. Respond to natural language queries about models
7. Format responses as structured JSON for integration with frontend applications
## Input Processing
You will receive inputs in the following format:
1. Django model code or references to model files
2. A natural language prompt specifying the type of analysis or insights requested
3. Optional context about the project or specific concerns
## Output Requirements
Your responses must:
1. Be formatted as valid JSON
2. Include a "status" field indicating success or failure
3. Provide an "insights" array containing the requested analysis
4. Include metadata about the analysis performed
5. Be structured in a way that's easy to parse and display in a frontend
## Analysis Types
You should be able to perform the following types of analysis:
### Structural Analysis
- Model count and complexity metrics
- Field type distribution
- Relationship mapping and visualization data
- Inheritance patterns
- Abstract models usage
### Performance Analysis
- Potential query bottlenecks
- Missing index recommendations
- Relationship optimization suggestions
- N+1 query vulnerability detection
### Security Analysis
- Sensitive field detection
- Permission model recommendations
- Data exposure risk assessment
### Data Integrity Analysis
- Constraint analysis
- Validation rule assessment
- Data consistency recommendations
## Example Interactions
### Example 1: Basic Model Analysis
**Input Prompt:**
"Analyze the User and Profile models and show me their relationship structure."
**Expected Response:**
```json
{
"status": "success",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"insights": [
{
"type": "relationship_analysis",
"models": ["User", "Profile"],
"relationships": [
{
"from": "Profile",
"to": "User",
"type": "OneToOne",
"field": "user",
"related_name": "profile",
"on_delete": "CASCADE"
}
],
"visualization_data": {
"nodes": [...],
"edges": [...]
}
}
],
"recommendations": [
"Consider adding an index to Profile.user for faster lookups"
]
}
```
### Example 2: Query Performance Analysis
**Input Prompt:**
"Identify potential performance issues in the Order and OrderItem models."
**Expected Response:**
```json
{
"status": "success",
"request_id": "e5f6g7h8",
"timestamp": "2025-05-25T23:22:30Z",
"insights": [
{
"type": "performance_analysis",
"models": ["Order", "OrderItem"],
"issues": [
{
"severity": "high",
"model": "OrderItem",
"field": "order",
"issue": "Missing database index on ForeignKey",
"impact": "Slow queries when filtering OrderItems by Order",
"solution": "Add db_index=True to order field"
},
{
"severity": "medium",
"model": "Order",
"issue": "No select_related in common queries",
"impact": "Potential N+1 query problems",
"solution": "Use select_related when querying Orders with OrderItems"
}
]
}
],
"code_suggestions": [
{
"model": "OrderItem",
"current": "order = models.ForeignKey(Order, on_delete=models.CASCADE)",
"suggested": "order = models.ForeignKey(Order, on_delete=models.CASCADE, db_index=True)"
}
]
}
```
## Limitations and Boundaries
1. You should not modify or execute code unless explicitly requested
2. You should indicate when you need additional information to provide accurate insights
3. You should acknowledge when a requested analysis is beyond your capabilities
4. You should not make assumptions about implementation details not present in the provided models
5. You should clearly distinguish between factual observations and recommendations
## Learning and Improvement
You should continuously improve your analysis capabilities by:
1. Learning from user feedback
2. Staying updated on Django best practices
3. Expanding your understanding of common model patterns
4. Refining your insight generation to be more relevant and actionable
## Ethical Considerations
1. Respect data privacy by not suggesting exposing sensitive information
2. Provide balanced recommendations that consider security, performance, and usability
3. Be transparent about the limitations of your analysis
4. Avoid making judgments about the quality of code beyond objective metrics
## Technical Integration
You will be integrated into a Django application as a service that:
1. Receives requests through a REST API
2. Has access to model definitions through Django's introspection capabilities
3. Returns JSON responses that can be directly used by frontend components
4. Maintains context across multiple related queries when session information is provided

View File

@ -0,0 +1,161 @@
# تدريب وكيل محلل نماذج Django بالعربية
## هدف الوكيل
أنت وكيل ذكاء اصطناعي متخصص مصمم لتحليل نماذج Django وتقديم معلومات مفيدة للمستخدمين. وظيفتك الأساسية هي تفسير هياكل نماذج Django والعلاقات والبيانات الوصفية لتوليد رؤى ذات معنى تساعد المطورين وأصحاب المصلحة على فهم نماذج البيانات الخاصة بهم بشكل أفضل.
## القدرات الأساسية
1. تحليل وفهم تعريفات نماذج Django
2. تحديد العلاقات بين النماذج (ForeignKey, ManyToMany, OneToOne)
3. تحليل حقول النموذج وأنواعها والقيود والبيانات الوصفية
4. توليد إحصائيات ورؤى حول استخدام النموذج وهيكله
5. تقديم توصيات لتحسين النموذج
6. الاستجابة للاستعلامات باللغة الطبيعية حول النماذج
7. تنسيق الردود كـ JSON منظم للتكامل مع تطبيقات الواجهة الأمامية
## معالجة المدخلات
ستتلقى المدخلات بالتنسيق التالي:
1. كود نموذج Django أو مراجع لملفات النموذج
2. استعلام باللغة الطبيعية يحدد نوع التحليل أو الرؤى المطلوبة
3. سياق اختياري حول المشروع أو مخاوف محددة
## متطلبات المخرجات
يجب أن تكون ردودك:
1. منسقة كـ JSON صالح
2. تتضمن حقل "status" يشير إلى النجاح أو الفشل
3. توفر مصفوفة "insights" تحتوي على التحليل المطلوب
4. تتضمن بيانات وصفية حول التحليل الذي تم إجراؤه
5. منظمة بطريقة يسهل تحليلها وعرضها في واجهة أمامية
## أنواع التحليل
يجب أن تكون قادرًا على إجراء الأنواع التالية من التحليل:
### التحليل الهيكلي
- عدد النماذج ومقاييس التعقيد
- توزيع أنواع الحقول
- رسم خرائط العلاقات وبيانات التصور
- أنماط الوراثة
- استخدام النماذج المجردة
### تحليل الأداء
- اختناقات الاستعلام المحتملة
- توصيات الفهرس المفقود
- اقتراحات تحسين العلاقة
- كشف ضعف استعلام N+1
### تحليل الأمان
- كشف الحقول الحساسة
- توصيات نموذج الإذن
- تقييم مخاطر التعرض للبيانات
### تحليل سلامة البيانات
- تحليل القيود
- تقييم قواعد التحقق
- توصيات اتساق البيانات
## أمثلة على التفاعلات
### مثال 1: تحليل النموذج الأساسي
**استعلام المدخلات:**
"قم بتحليل نماذج المستخدم والملف الشخصي وأظهر لي هيكل العلاقة بينهما."
**الرد المتوقع:**
```json
{
"status": "نجاح",
"request_id": "a1b2c3d4",
"timestamp": "2025-05-25T23:21:56Z",
"insights": [
{
"type": "تحليل_العلاقات",
"models": ["User", "Profile"],
"relationships": [
{
"from": "Profile",
"to": "User",
"type": "OneToOne",
"field": "user",
"related_name": "profile",
"on_delete": "CASCADE"
}
],
"visualization_data": {
"nodes": [...],
"edges": [...]
}
}
],
"recommendations": [
"فكر في إضافة فهرس إلى Profile.user للبحث الأسرع"
]
}
```
### مثال 2: تحليل أداء الاستعلام
**استعلام المدخلات:**
"حدد مشاكل الأداء المحتملة في نماذج الطلب وعناصر الطلب."
**الرد المتوقع:**
```json
{
"status": "نجاح",
"request_id": "e5f6g7h8",
"timestamp": "2025-05-25T23:22:30Z",
"insights": [
{
"type": "تحليل_الأداء",
"models": ["Order", "OrderItem"],
"issues": [
{
"severity": "عالية",
"model": "OrderItem",
"field": "order",
"issue": "فهرس قاعدة بيانات مفقود على ForeignKey",
"impact": "استعلامات بطيئة عند تصفية OrderItems حسب Order",
"solution": "أضف db_index=True إلى حقل order"
},
{
"severity": "متوسطة",
"model": "Order",
"issue": "لا يوجد select_related في الاستعلامات الشائعة",
"impact": "مشاكل استعلام N+1 محتملة",
"solution": "استخدم select_related عند الاستعلام عن Orders مع OrderItems"
}
]
}
],
"code_suggestions": [
{
"model": "OrderItem",
"current": "order = models.ForeignKey(Order, on_delete=models.CASCADE)",
"suggested": "order = models.ForeignKey(Order, on_delete=models.CASCADE, db_index=True)"
}
]
}
```
## القيود والحدود
1. لا يجب عليك تعديل أو تنفيذ التعليمات البرمجية ما لم يُطلب منك ذلك صراحةً
2. يجب أن تشير عندما تحتاج إلى معلومات إضافية لتقديم رؤى دقيقة
3. يجب أن تعترف عندما يكون التحليل المطلوب خارج قدراتك
4. لا يجب أن تفترض تفاصيل التنفيذ غير الموجودة في النماذج المقدمة
5. يجب أن تميز بوضوح بين الملاحظات الواقعية والتوصيات
## التعلم والتحسين
يجب أن تحسن باستمرار قدرات التحليل الخاصة بك من خلال:
1. التعلم من تعليقات المستخدم
2. البقاء على اطلاع بأفضل ممارسات Django
3. توسيع فهمك لأنماط النموذج الشائعة
4. تحسين توليد الرؤى لتكون أكثر صلة وقابلية للتنفيذ
## الاعتبارات الأخلاقية
1. احترام خصوصية البيانات من خلال عدم اقتراح كشف المعلومات الحساسة
2. تقديم توصيات متوازنة تراعي الأمان والأداء وسهولة الاستخدام
3. الشفافية بشأن حدود تحليلك
4. تجنب إصدار أحكام حول جودة الكود بما يتجاوز المقاييس الموضوعية
## التكامل التقني
سيتم دمجك في تطبيق Django كخدمة:
1. تتلقى الطلبات من خلال واجهة برمجة تطبيقات REST
2. لديها إمكانية الوصول إلى تعريفات النموذج من خلال قدرات التفتيش الذاتي لـ Django
3. تعيد استجابات JSON التي يمكن استخدامها مباشرة بواسطة مكونات الواجهة الأمامية
4. تحافظ على السياق عبر استعلامات متعددة ذات صلة عند توفير معلومات الجلسة

View File

@ -1,7 +1,8 @@
from django.urls import path from django.urls import path
from . import views from . import views
app_name = "haikalbot"
urlpatterns = [ urlpatterns = [
path("", views.ChatbotView.as_view(), name="chatbot"), path("analyze/", views.ModelAnalystView.as_view(), name="haikalbot"),
] ]

View File

@ -0,0 +1,119 @@
def format_response(prompt, language, request_id, timestamp):
"""
Format a standardized response structure based on language.
This utility creates a consistent response structure with the appropriate
keys based on the specified language.
:param prompt: The original user prompt
:type prompt: str
:param language: Language code ('en' or 'ar')
:type language: str
:param request_id: Unique identifier for the request
:type request_id: str
:param timestamp: ISO-formatted timestamp
:type timestamp: str
:return: Formatted response structure
:rtype: dict
"""
if language == 'ar':
return {
'حالة': "نجاح",
'معرف_الطلب': request_id,
'الطابع_الزمني': timestamp,
'الاستعلام': prompt,
'التحليلات': []
}
else:
return {
'status': "success",
'request_id': request_id,
'timestamp': timestamp,
'prompt': prompt,
'insights': []
}
def format_error_response(message, status_code, language='en'):
"""
Format a standardized error response.
:param message: Error message
:type message: str
:param status_code: HTTP status code
:type status_code: int
:param language: Language code ('en' or 'ar')
:type language: str
:return: Formatted error response
:rtype: dict
"""
if language == 'ar':
return {
'حالة': "خطأ",
'رسالة': message,
'رمز_الحالة': status_code
}
else:
return {
'status': "error",
'message': message,
'status_code': status_code
}
def format_insights_for_display(insights, language='en'):
"""
Format insights for human-readable display.
:param insights: Raw insights data
:type insights: dict
:param language: Language code ('en' or 'ar')
:type language: str
:return: Human-readable formatted insights
:rtype: str
"""
formatted_text = ""
# Determine keys based on language
insights_key = 'التحليلات' if language == 'ar' else 'insights'
recs_key = 'التوصيات' if language == 'ar' else 'recommendations'
# Format insights
if insights_key in insights and insights[insights_key]:
header = "## نتائج التحليل\n\n" if language == 'ar' else "## Analysis Results\n\n"
formatted_text += header
for insight in insights[insights_key]:
if isinstance(insight, dict):
# Add insight type as header
if 'type' in insight or 'نوع' in insight:
type_key = 'نوع' if language == 'ar' else 'type'
insight_type = insight.get(type_key, insight.get('type', insight.get('نوع', '')))
formatted_text += f"### {insight_type}\n\n"
# Format results if present
results_key = 'النتائج' if language == 'ar' else 'results'
if results_key in insight:
for result in insight[results_key]:
model_key = 'النموذج' if language == 'ar' else 'model'
error_key = 'خطأ' if language == 'ar' else 'error'
count_key = 'العدد' if language == 'ar' else 'count'
model_name = result.get(model_key, result.get('model', ''))
if error_key in result:
formatted_text += f"- **{model_name}**: {result[error_key]}\n"
elif count_key in result:
formatted_text += f"- **{model_name}**: {result[count_key]}\n"
formatted_text += "\n"
# Format recommendations
if recs_key in insights and insights[recs_key]:
header = "## التوصيات\n\n" if language == 'ar' else "## Recommendations\n\n"
formatted_text += header
for rec in insights[recs_key]:
formatted_text += f"- {rec}\n"
return formatted_text

View File

@ -1,38 +1,253 @@
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.shortcuts import render from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.views import View
from django.http import JsonResponse from django.http import JsonResponse
from .chatbot_logic import get_gpt4_response from django.apps import apps
from django.db import models
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
import json import json
import hashlib
import logging
import uuid
import re
class ChatbotView(LoginRequiredMixin, View): from inventory import models as inventory_models
from inventory.utils import get_user_type
from .models import AnalysisCache
from .services.llm_service import get_llm_chain
from .services.analysis_service import (
generate_model_insight,
generate_count_insight,
generate_relationship_insight,
generate_performance_insight,
generate_statistics_insight,
generate_recommendations
)
from .services.cache_service import CacheService
from .utils.response_formatter import format_response
logger = logging.getLogger(__name__)
@method_decorator(csrf_exempt, name='dispatch')
class ModelAnalystView(View):
""" """
Represents a view for handling chatbot interactions. View for handling model analysis requests and rendering the chatbot interface.
This class handles GET and POST requests for a chatbot interface. It leverages This view provides both GET and POST methods:
`LoginRequiredMixin` to ensure that only authenticated users can access it. On GET - GET: Renders the chatbot interface
requests, it renders the chatbot interface, while on POST requests, it interacts - POST: Processes analysis requests and returns JSON responses
with a chatbot backend to process user messages and return responses.
:ivar request: The HTTP request object, providing metadata about the The view includes caching, permission checking, and multilingual support.
current session and user.
:type request: HttpRequest
""" """
def get(self, request): # Configuration settings (can be moved to Django settings)
return render(request, "haikalbot/chatbot.html") CACHE_DURATION = getattr(settings, 'ANALYSIS_CACHE_DURATION', 3600)
DEFAULT_LANGUAGE = getattr(settings, 'DEFAULT_LANGUAGE', 'en')
def post(self, request): def get(self, request, *args, **kwargs):
dealer = request.user.dealer """
Render the chatbot interface.
:param request: The HTTP request
:return: Rendered chatbot.html template
"""
context = {
'dark_mode': request.session.get('dark_mode', False)
}
return render(request, "haikalbot/chatbot.html", context)
def post(self, request, *args, **kwargs):
"""
Process analysis requests and return JSON responses.
:param request: The HTTP request containing the prompt
:return: JsonResponse with analysis results
"""
try: try:
# Parse request data
data = json.loads(request.body) data = json.loads(request.body)
user_message = data.get("message", "").strip() prompt = data.get('prompt')
language = data.get('language', self.DEFAULT_LANGUAGE)
dealer = get_user_type(request)
if not user_message: # Validate request
return JsonResponse({"error": "Message cannot be empty."}, status=400) if not prompt:
error_msg = "الاستعلام مطلوب" if language == 'ar' else "Prompt is required"
return self._error_response(error_msg, 400)
chatbot_response = get_gpt4_response(user_message, dealer) if not self._check_permissions(dealer.id):
error_msg = "تم رفض الإذن" if language == 'ar' else "Permission denied"
return self._error_response(error_msg, 403)
# Check cache
cache_service = CacheService()
prompt_hash = cache_service.generate_hash(prompt, dealer.id, language)
cached_result = cache_service.get_cached_result(prompt_hash, request.user, dealer.id)
if cached_result:
return JsonResponse(cached_result)
# Process prompt and generate insights
insights = self._process_prompt(prompt, dealer, language)
# Cache results
cache_service.cache_result(
prompt_hash,
insights,
request.user,
dealer.id,
self.CACHE_DURATION
)
return JsonResponse(insights)
return JsonResponse({"response": chatbot_response}, status=200)
except json.JSONDecodeError: except json.JSONDecodeError:
return JsonResponse({"error": "Invalid JSON format."}, status=400) error_msg = "بيانات JSON غير صالحة في نص الطلب" if language == 'ar' else "Invalid JSON in request body"
return self._error_response(error_msg, 400)
except Exception as e:
logger.exception("Error processing model analysis request")
error_msg = f"حدث خطأ: {str(e)}" if language == 'ar' else f"An error occurred: {str(e)}"
return self._error_response(error_msg, 500)
def _error_response(self, message, status):
"""
Create a standardized error response.
:param message: Error message
:param status: HTTP status code
:return: JsonResponse with error details
"""
return JsonResponse({"status": "error", "message": message}, status=status)
def _check_permissions(self, dealer_id):
"""
Check if the dealer has permissions to access the analysis.
:param dealer_id: ID of the dealer
:return: True if dealer has permissions, False otherwise
"""
try:
return inventory_models.Dealer.objects.filter(id=dealer_id).exists()
except Exception:
logger.exception("Error checking permissions")
return False
def _process_prompt(self, prompt, dealer, language):
"""
Process the prompt and generate insights.
:param prompt: User's prompt text
:param dealer: Dealer object
:param language: Language code (e.g., 'en', 'ar')
:return: Dictionary with analysis results
"""
# Initialize response structure
response = format_response(
prompt=prompt,
language=language,
request_id=str(uuid.uuid4()),
timestamp=timezone.now().isoformat()
)
# Get LLM chain for prompt analysis
chain = get_llm_chain(language=language)
# Parse prompt using LLM
if chain:
try:
result = chain.invoke({"prompt": prompt})
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
result = json.loads(json_match.group(1)) if json_match else {}
except Exception as e:
logger.error(f"LLM error fallback: {e}")
result = {}
else:
result = {}
# Extract analysis parameters
analysis_type = result.get('analysis_type', 'general')
target_models = result.get('target_models', [])
query_params = result.get('query_params', {})
# Get models to analyze
all_models = list(apps.get_models())
models_to_analyze = self._filter_models(all_models, target_models)
if dealer:
models_to_analyze = self._filter_by_dealer(models_to_analyze, dealer.id)
# Select analysis method based on type
analysis_method = {
'count': generate_count_insight,
'relationship': generate_relationship_insight,
'performance': generate_performance_insight,
'statistics': generate_statistics_insight
}.get(analysis_type, self._generate_model_insight_all)
# Generate insights
insights = analysis_method(models_to_analyze, query_params, dealer.id if dealer else None, language)
# Add insights to response
insights_key = "التحليلات" if language == 'ar' else "insights"
if isinstance(insights, list):
response[insights_key].extend(insights)
else:
response[insights_key].append(insights)
# Generate recommendations
recommendations = generate_recommendations(models_to_analyze, analysis_type, language)
if recommendations:
recs_key = "التوصيات" if language == 'ar' else "recommendations"
response[recs_key] = recommendations
# Add plain text summary for response
summary_lines = []
for insight in response[insights_key]:
if isinstance(insight, dict):
summary_lines.append(insight.get('type', 'Insight'))
else:
summary_lines.append(str(insight))
response['response'] = "\n".join(summary_lines)
return response
def _filter_models(self, all_models, target_models):
"""
Filter models based on target model names.
:param all_models: List of all available models
:param target_models: List of target model names
:return: Filtered list of models
"""
if not target_models:
return all_models
return [m for m in all_models if m.__name__ in target_models or
m.__name__.lower() in [t.lower() for t in target_models]]
def _filter_by_dealer(self, models, dealer_id):
"""
Filter models that are relevant to the dealer.
:param models: List of models
:param dealer_id: ID of the dealer
:return: Filtered list of models
"""
dealer_models = [m for m in models if any(f.name in ('dealer', 'dealer_id')
for f in m._meta.fields)]
return dealer_models if dealer_models else models
def _generate_model_insight_all(self, models, query_params, dealer_id, language):
"""
Generate insights for all models.
:param models: List of models
:param query_params: Query parameters
:param dealer_id: ID of the dealer
:param language: Language code
:return: List of insights
"""
return [generate_model_insight(m, dealer_id, language) for m in models]

8
install_marwan.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
#sudo brew update && sudo brew install libgl1 libglib2.0-dev libzbar0 cmake build-essential xmlsec1 libxmlsec1-dev pkg-config libxml2-dev libxmlsec1-dev libxmlsec1-openssl libssl-dev -y
pip install --upgrade pip
pip install -r requirements.txt
./apply_initial_migrations.sh
./load_initial_data_marwan.sh

View File

@ -1,4 +1,4 @@
# Generated by Django 5.1.7 on 2025-05-19 11:40 # Generated by Django 5.2.1 on 2025-05-25 23:01
import datetime import datetime
import django.core.validators import django.core.validators
@ -21,32 +21,11 @@ class Migration(migrations.Migration):
('appointment', '0001_initial'), ('appointment', '0001_initial'),
('auth', '0012_alter_user_first_name_max_length'), ('auth', '0012_alter_user_first_name_max_length'),
('contenttypes', '0002_remove_content_type_name'), ('contenttypes', '0002_remove_content_type_name'),
('django_ledger', '0021_alter_bankaccountmodel_account_model_and_more'), ('django_ledger', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL), migrations.swappable_dependency(settings.AUTH_USER_MODEL),
] ]
operations = [ operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True, verbose_name='Primary Key')),
('slug', models.SlugField(blank=True, help_text='Slug for the object. If not provided, it will be generated automatically.', null=True, unique=True, verbose_name='Slug')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated At')),
('vin', models.CharField(max_length=17, unique=True, verbose_name='VIN')),
('year', models.IntegerField(verbose_name='Year')),
('status', models.CharField(choices=[('available', 'Available'), ('sold', 'Sold'), ('hold', 'Hold'), ('damaged', 'Damaged'), ('reserved', 'Reserved'), ('transfer', 'Transfer')], default='available', max_length=10, verbose_name='Status')),
('stock_type', models.CharField(choices=[('new', 'New'), ('used', 'Used')], default='new', max_length=10, verbose_name='Stock Type')),
('remarks', models.TextField(blank=True, null=True, verbose_name='Remarks')),
('mileage', models.IntegerField(blank=True, null=True, verbose_name='Mileage')),
('receiving_date', models.DateTimeField(verbose_name='Receiving Date')),
('hash', models.CharField(blank=True, max_length=64, null=True, verbose_name='Hash')),
],
options={
'verbose_name': 'Car',
'verbose_name_plural': 'Cars',
},
),
migrations.CreateModel( migrations.CreateModel(
name='CarEquipment', name='CarEquipment',
fields=[ fields=[
@ -146,6 +125,29 @@ class Migration(migrations.Migration):
}, },
bases=(models.Model, inventory.mixins.LocalizedNameMixin), bases=(models.Model, inventory.mixins.LocalizedNameMixin),
), ),
migrations.CreateModel(
name='Car',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True, verbose_name='Primary Key')),
('slug', models.SlugField(blank=True, help_text='Slug for the object. If not provided, it will be generated automatically.', null=True, unique=True, verbose_name='Slug')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated At')),
('vin', models.CharField(max_length=17, unique=True, verbose_name='VIN')),
('year', models.IntegerField(verbose_name='Year')),
('status', models.CharField(choices=[('available', 'Available'), ('sold', 'Sold'), ('hold', 'Hold'), ('damaged', 'Damaged'), ('reserved', 'Reserved'), ('transfer', 'Transfer')], default='available', max_length=10, verbose_name='Status')),
('stock_type', models.CharField(choices=[('new', 'New'), ('used', 'Used')], default='new', max_length=10, verbose_name='Stock Type')),
('remarks', models.TextField(blank=True, null=True, verbose_name='Remarks')),
('mileage', models.IntegerField(blank=True, null=True, verbose_name='Mileage')),
('receiving_date', models.DateTimeField(verbose_name='Receiving Date')),
('hash', models.CharField(blank=True, max_length=64, null=True, verbose_name='Hash')),
('item_model', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='django_ledger.itemmodel', verbose_name='Item Model')),
('id_car_make', models.ForeignKey(blank=True, db_column='id_car_make', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='inventory.carmake', verbose_name='Make')),
],
options={
'verbose_name': 'Car',
'verbose_name_plural': 'Cars',
},
),
migrations.CreateModel( migrations.CreateModel(
name='CarFinance', name='CarFinance',
fields=[ fields=[
@ -161,11 +163,6 @@ class Migration(migrations.Migration):
'verbose_name_plural': 'Car Financial Details', 'verbose_name_plural': 'Car Financial Details',
}, },
), ),
migrations.AddField(
model_name='car',
name='id_car_make',
field=models.ForeignKey(blank=True, db_column='id_car_make', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='inventory.carmake', verbose_name='Make'),
),
migrations.CreateModel( migrations.CreateModel(
name='CarModel', name='CarModel',
fields=[ fields=[
@ -488,6 +485,7 @@ class Migration(migrations.Migration):
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')), ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')), ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='notes_created', to=settings.AUTH_USER_MODEL)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='notes_created', to=settings.AUTH_USER_MODEL)),
('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='inventory.dealer')),
], ],
options={ options={
'verbose_name': 'Note', 'verbose_name': 'Note',
@ -524,6 +522,7 @@ class Migration(migrations.Migration):
('active', models.BooleanField(default=True, verbose_name='Active')), ('active', models.BooleanField(default=True, verbose_name='Active')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')), ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('slug', models.SlugField(blank=True, editable=False, max_length=255, null=True, unique=True)),
('customer_model', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_ledger.customermodel')), ('customer_model', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_ledger.customermodel')),
('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='organizations', to='inventory.dealer')), ('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='organizations', to='inventory.dealer')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='organization_profile', to=settings.AUTH_USER_MODEL)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='organization_profile', to=settings.AUTH_USER_MODEL)),
@ -555,6 +554,7 @@ class Migration(migrations.Migration):
('next_action_date', models.DateTimeField(blank=True, null=True, verbose_name='Next Action Date')), ('next_action_date', models.DateTimeField(blank=True, null=True, verbose_name='Next Action Date')),
('is_converted', models.BooleanField(default=False)), ('is_converted', models.BooleanField(default=False)),
('converted_at', models.DateTimeField(blank=True, null=True)), ('converted_at', models.DateTimeField(blank=True, null=True)),
('salary', models.PositiveIntegerField(blank=True, null=True, verbose_name='Salary')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Created')), ('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')), ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('slug', models.SlugField(blank=True, null=True, unique=True)), ('slug', models.SlugField(blank=True, null=True, unique=True)),
@ -645,8 +645,10 @@ class Migration(migrations.Migration):
('arabic_name', models.CharField(max_length=255, verbose_name='Arabic Name')), ('arabic_name', models.CharField(max_length=255, verbose_name='Arabic Name')),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region='SA', verbose_name='Phone Number')), ('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region='SA', verbose_name='Phone Number')),
('staff_type', models.CharField(choices=[('inventory', 'Inventory'), ('accountant', 'Accountant'), ('sales', 'Sales')], max_length=255, verbose_name='Staff Type')), ('staff_type', models.CharField(choices=[('inventory', 'Inventory'), ('accountant', 'Accountant'), ('sales', 'Sales')], max_length=255, verbose_name='Staff Type')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')), ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('slug', models.SlugField(blank=True, editable=False, max_length=255, null=True, unique=True)),
('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='staff', to='inventory.dealer')), ('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='staff', to='inventory.dealer')),
('staff_member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='staff', to='appointment.staffmember')), ('staff_member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='staff', to='appointment.staffmember')),
], ],
@ -670,8 +672,9 @@ class Migration(migrations.Migration):
('closing_date', models.DateField(blank=True, null=True, verbose_name='Closing Date')), ('closing_date', models.DateField(blank=True, null=True, verbose_name='Closing Date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')), ('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('slug', models.SlugField(blank=True, help_text='Unique slug for the opportunity.', null=True, unique=True, verbose_name='Slug')),
('car', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='inventory.car', verbose_name='Car')), ('car', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='inventory.car', verbose_name='Car')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='django_ledger.customermodel')), ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='inventory.customer')),
('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='inventory.dealer')), ('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='inventory.dealer')),
('estimate', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='opportunity', to='django_ledger.estimatemodel')), ('estimate', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='opportunity', to='django_ledger.estimatemodel')),
('lead', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunity', to='inventory.lead')), ('lead', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunity', to='inventory.lead')),
@ -750,6 +753,7 @@ class Migration(migrations.Migration):
('email', models.EmailField(max_length=255, verbose_name='Email Address')), ('email', models.EmailField(max_length=255, verbose_name='Email Address')),
('address', models.CharField(max_length=200, verbose_name='Address')), ('address', models.CharField(max_length=200, verbose_name='Address')),
('logo', models.ImageField(blank=True, null=True, upload_to='logos/vendors', verbose_name='Logo')), ('logo', models.ImageField(blank=True, null=True, upload_to='logos/vendors', verbose_name='Logo')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('slug', models.SlugField(blank=True, max_length=255, null=True, unique=True, verbose_name='Slug')), ('slug', models.SlugField(blank=True, max_length=255, null=True, unique=True, verbose_name='Slug')),
('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vendors', to='inventory.dealer')), ('dealer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vendors', to='inventory.dealer')),

View File

@ -6,7 +6,7 @@ from django.db import migrations, models
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('inventory', '0011_alter_car_item_model'), ('inventory', '0001_initial'),
] ]
operations = [ operations = [

View File

@ -1,18 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-20 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='active',
field=models.BooleanField(default=True, verbose_name='Active'),
),
]

View File

@ -1,19 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-20 12:45
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0002_vendor_active'),
]
operations = [
migrations.AlterField(
model_name='opportunity',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunities', to='inventory.customer'),
),
]

View File

@ -1,18 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-20 12:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0003_alter_opportunity_customer'),
]
operations = [
migrations.AddField(
model_name='opportunity',
name='slug',
field=models.SlugField(blank=True, help_text='Unique slug for the opportunity.', null=True, unique=True, verbose_name='Slug'),
),
]

View File

@ -1,20 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-20 13:45
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0004_opportunity_slug'),
]
operations = [
migrations.AddField(
model_name='notes',
name='dealer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='inventory.dealer'),
preserve_default=False,
),
]

View File

@ -1,18 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-21 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0005_notes_dealer'),
]
operations = [
migrations.AddField(
model_name='organization',
name='slug',
field=models.SlugField(blank=True, editable=False, max_length=255, null=True, unique=True),
),
]

View File

@ -1,23 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-21 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0006_organization_slug'),
]
operations = [
migrations.AddField(
model_name='staff',
name='active',
field=models.BooleanField(default=True, verbose_name='Active'),
),
migrations.AddField(
model_name='staff',
name='slug',
field=models.SlugField(blank=True, editable=False, max_length=255, null=True, unique=True),
),
]

View File

@ -1,18 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-21 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0007_staff_active_staff_slug'),
]
operations = [
migrations.AddField(
model_name='lead',
name='salary',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Salary'),
),
]

View File

@ -1,20 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-25 11:41
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ledger', '0021_alter_bankaccountmodel_account_model_and_more'),
('inventory', '0008_lead_salary'),
]
operations = [
migrations.AddField(
model_name='car',
name='item_model',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cars', to='django_ledger.itemmodel', verbose_name='Item Model'),
),
]

View File

@ -1,20 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-25 11:43
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ledger', '0021_alter_bankaccountmodel_account_model_and_more'),
('inventory', '0009_car_item_model'),
]
operations = [
migrations.AlterField(
model_name='car',
name='item_model',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cars', to='django_ledger.itemmodel', verbose_name='Item Model'),
),
]

View File

@ -1,20 +0,0 @@
# Generated by Django 5.1.7 on 2025-05-25 11:44
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ledger', '0021_alter_bankaccountmodel_account_model_and_more'),
('inventory', '0010_alter_car_item_model'),
]
operations = [
migrations.AlterField(
model_name='car',
name='item_model',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='django_ledger.itemmodel', verbose_name='Item Model'),
),
]

View File

@ -45,6 +45,7 @@ urlpatterns = [
# Tasks # Tasks
path('tasks/', views.task_list, name='task_list'), path('tasks/', views.task_list, name='task_list'),
path('legal/', views.terms_and_privacy, name='terms_and_privacy'),
# path('tasks/<int:task_id>/detail/', views.task_detail, name='task_detail'), # path('tasks/<int:task_id>/detail/', views.task_detail, name='task_detail'),
# Dashboards # Dashboards
# path("user/<int:pk>/settings/", views.UserSettingsView.as_view(), name="user_settings"), # path("user/<int:pk>/settings/", views.UserSettingsView.as_view(), name="user_settings"),

View File

@ -510,6 +510,10 @@ class SalesDashboard(LoginRequiredMixin, TemplateView):
return context return context
def terms_and_privacy(request):
return render(request, 'terms_and_privacy.html')
class WelcomeView(TemplateView): class WelcomeView(TemplateView):
""" """
Handles the rendering and context data for the Welcome view. Handles the rendering and context data for the Welcome view.

46
load_initial_data_marwan.sh Executable file
View File

@ -0,0 +1,46 @@
#!/bin/bash
echo "Loading initial data"
echo "Loading carmake"
python manage.py loaddata carmake_with_slugs.json
echo "Loading carmodel"
python manage.py loaddata carmodel_with_slugs.json
echo "Loading carserie"
python manage.py loaddata carserie_with_slugs.json
echo "Loading cartrim"
python manage.py loaddata cartrim_with_slugs.json
echo "Loading carequipment"
python manage.py loaddata carequipment_with_slugs.json
echo "Loading carspecification"
python manage.py loaddata carspecification.json
echo "Loading carspecificationvalue"
python manage.py loaddata carspecificationvalue.json
echo "Loading caroption"
python manage.py loaddata caroption_with_slugs.json
echo "Loading caroptionvalue"
python manage.py loaddata caroptionvalue.json
echo "Populating colors"
python manage.py populate_colors
echo "Populating Plans"
python manage.py loaddata plan.json
python manage.py loaddata pricing.json
python manage.py loaddata planpricing.json
python manage.py loaddata quota.json
python manage.py loaddata planquota.json
python manage.py set_vat
python manage.py initial_services_offered
echo "Done"

Binary file not shown.

File diff suppressed because it is too large Load Diff

50
plan.json Normal file
View File

@ -0,0 +1,50 @@
[
{
"model": "plans.plan",
"pk": 1,
"fields": {
"order": 0,
"created": "2025-05-03T16:27:13.240Z",
"updated_at": "2025-05-03T16:27:13.240Z",
"name": "الأساسية",
"description": "مثالية لوكالات السيارات الصغيرة أو الأفراد الذين يديرون عددًا محدودًا من السيارات\r\nتتيح الخطة إدارة ما يصل إلى 3 مستخدمين\r\nتتيح الخطة إدارة ما يصل إلى 100 مركبة\r\nتوفر ميزات أساسية لمتابعة المبيعات والمخزون بسهولة دون تكلفة مرتفعة",
"default": null,
"available": true,
"visible": true,
"customized": null,
"url": ""
}
},
{
"model": "plans.plan",
"pk": 2,
"fields": {
"order": 1,
"created": "2025-05-03T16:29:17.050Z",
"updated_at": "2025-05-03T16:29:17.050Z",
"name": "المتقدمة",
"description": "مصممة للوكالات المتوسطة الحجم التي تحتاج إلى إدارة عدد أكبر من المستخدمين والمخزون\r\nتدعم 6 مستخدمين\r\nوما يصل إلى 300 مركبة في المخزون\r\nتشمل أدوات إضافية لتحليل الأداء وتتبع المبيعات وإدارة المعاملات بسهولة.",
"default": null,
"available": true,
"visible": true,
"customized": null,
"url": ""
}
},
{
"model": "plans.plan",
"pk": 3,
"fields": {
"order": 2,
"created": "2025-05-03T16:30:47.026Z",
"updated_at": "2025-05-03T16:30:47.026Z",
"name": "البلاتينيوم",
"description": "الخيار الأمثل للوكالات الكبيرة التي تتعامل مع حجم عملاء كبير ومخزون واسع\r\nتدعم 12 مستخدمًا\r\nوما يصل إلى 1000 مركبة في المخزون\r\nتوفر ميزات متقدمة لتحسين الكفاءة مثل التحليلات المخصصة، ودعم العملاء المتميز، وإدارة العقود والمستندات",
"default": null,
"available": true,
"visible": true,
"customized": null,
"url": ""
}
}
]

86
planpricing.json Normal file
View File

@ -0,0 +1,86 @@
[
{
"model": "plans.planpricing",
"pk": 1,
"fields": {
"created": "2025-05-03T16:27:13.240Z",
"updated_at": "2025-05-03T16:27:13.240Z",
"plan": 1,
"pricing": 1,
"price": "600.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
},
{
"model": "plans.planpricing",
"pk": 2,
"fields": {
"created": "2025-05-03T16:27:13.241Z",
"updated_at": "2025-05-03T16:27:13.241Z",
"plan": 1,
"pricing": 2,
"price": "6480.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
},
{
"model": "plans.planpricing",
"pk": 3,
"fields": {
"created": "2025-05-03T16:29:17.051Z",
"updated_at": "2025-05-03T16:29:17.051Z",
"plan": 2,
"pricing": 1,
"price": "900.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
},
{
"model": "plans.planpricing",
"pk": 4,
"fields": {
"created": "2025-05-03T16:29:17.051Z",
"updated_at": "2025-05-03T16:29:17.051Z",
"plan": 2,
"pricing": 2,
"price": "9720.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
},
{
"model": "plans.planpricing",
"pk": 5,
"fields": {
"created": "2025-05-03T16:30:47.026Z",
"updated_at": "2025-05-03T16:30:47.026Z",
"plan": 3,
"pricing": 1,
"price": "1300.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
},
{
"model": "plans.planpricing",
"pk": 6,
"fields": {
"created": "2025-05-03T16:30:47.027Z",
"updated_at": "2025-05-03T16:30:47.027Z",
"plan": 3,
"pricing": 2,
"price": "14040.00",
"order": 0,
"has_automatic_renewal": false,
"visible": true
}
}
]

68
planquota.json Normal file
View File

@ -0,0 +1,68 @@
[
{
"model": "plans.planquota",
"pk": 1,
"fields": {
"created": "2025-05-03T16:27:13.241Z",
"updated_at": "2025-05-03T16:27:13.241Z",
"plan": 1,
"quota": 1,
"value": 3
}
},
{
"model": "plans.planquota",
"pk": 2,
"fields": {
"created": "2025-05-03T16:27:13.241Z",
"updated_at": "2025-05-03T16:27:13.241Z",
"plan": 1,
"quota": 2,
"value": 100
}
},
{
"model": "plans.planquota",
"pk": 3,
"fields": {
"created": "2025-05-03T16:29:17.051Z",
"updated_at": "2025-05-03T16:29:17.051Z",
"plan": 2,
"quota": 1,
"value": 6
}
},
{
"model": "plans.planquota",
"pk": 4,
"fields": {
"created": "2025-05-03T16:29:17.052Z",
"updated_at": "2025-05-03T16:29:17.052Z",
"plan": 2,
"quota": 2,
"value": 300
}
},
{
"model": "plans.planquota",
"pk": 5,
"fields": {
"created": "2025-05-03T16:30:47.027Z",
"updated_at": "2025-05-03T16:30:47.027Z",
"plan": 3,
"quota": 1,
"value": 12
}
},
{
"model": "plans.planquota",
"pk": 6,
"fields": {
"created": "2025-05-03T16:30:47.027Z",
"updated_at": "2025-05-03T16:30:47.027Z",
"plan": 3,
"quota": 2,
"value": 1000
}
}
]

24
pricing.json Normal file
View File

@ -0,0 +1,24 @@
[
{
"model": "plans.pricing",
"pk": 1,
"fields": {
"created": "2025-05-03T16:25:31.501Z",
"updated_at": "2025-05-03T16:25:31.501Z",
"name": "شهري",
"period": 30,
"url": ""
}
},
{
"model": "plans.pricing",
"pk": 2,
"fields": {
"created": "2025-05-03T16:25:45.320Z",
"updated_at": "2025-05-03T16:25:45.320Z",
"name": "سنوي",
"period": 365,
"url": ""
}
}
]

32
quota.json Normal file
View File

@ -0,0 +1,32 @@
[
{
"model": "plans.quota",
"pk": 1,
"fields": {
"order": 0,
"created": "2025-05-03T16:26:49.537Z",
"updated_at": "2025-05-03T16:26:49.537Z",
"codename": "Users",
"name": "مستخدم",
"unit": "",
"description": "",
"is_boolean": false,
"url": ""
}
},
{
"model": "plans.quota",
"pk": 2,
"fields": {
"order": 1,
"created": "2025-05-03T16:27:03.802Z",
"updated_at": "2025-05-03T16:27:03.802Z",
"codename": "Cars",
"name": "سيارات",
"unit": "",
"description": "",
"is_boolean": false,
"url": ""
}
}
]

View File

@ -1,13 +1,14 @@
aiohappyeyeballs==2.6.1 aiohappyeyeballs==2.6.1
aiohttp==3.11.18 aiohttp==3.12.0
aiohttp-retry==2.9.1 aiohttp-retry==2.9.1
aiosignal==1.3.2 aiosignal==1.3.2
alabaster==1.0.0 alabaster==1.0.0
albucore==0.0.24 albucore==0.0.24
albumentations==2.0.6 albumentations==2.0.7
annotated-types==0.7.0 annotated-types==0.7.0
anyio==4.9.0 anyio==4.9.0
arabic-reshaper==3.0.0 arabic-reshaper==3.0.0
arrow==1.3.0
asgiref==3.8.1 asgiref==3.8.1
astor==0.8.1 astor==0.8.1
astroid==3.3.10 astroid==3.3.10
@ -16,6 +17,7 @@ autopep8==2.3.2
Babel==2.15.0 Babel==2.15.0
beautifulsoup4==4.13.4 beautifulsoup4==4.13.4
bleach==6.2.0 bleach==6.2.0
blessed==1.21.0
blinker==1.9.0 blinker==1.9.0
Brotli==1.1.0 Brotli==1.1.0
cattrs==24.1.3 cattrs==24.1.3
@ -23,16 +25,17 @@ certifi==2025.4.26
cffi==1.17.1 cffi==1.17.1
chardet==5.2.0 chardet==5.2.0
charset-normalizer==3.4.2 charset-normalizer==3.4.2
click==8.2.0 click==8.2.1
colorama==0.4.6 colorama==0.4.6
commonmark==0.9.1 commonmark==0.9.1
contourpy==1.3.2 contourpy==1.3.2
crispy-bootstrap5==2025.4 crispy-bootstrap5==2025.4
cryptography==44.0.3 cryptography==45.0.3
cssselect2==0.8.0 cssselect2==0.8.0
ctranslate2==4.6.0 ctranslate2==4.6.0
cycler==0.12.1 cycler==0.12.1
Cython==3.1.0 Cython==3.1.1
dataclasses-json==0.6.7
decorator==5.2.1 decorator==5.2.1
defusedxml==0.7.1 defusedxml==0.7.1
desert==2020.11.18 desert==2020.11.18
@ -40,13 +43,13 @@ diff-match-patch==20241021
dill==0.4.0 dill==0.4.0
distro==1.9.0 distro==1.9.0
dj-rest-auth==7.0.1 dj-rest-auth==7.0.1
dj-shop-cart==8.0.0a2
Django==5.2.1 Django==5.2.1
django-allauth==65.8.0 django-allauth==65.8.1
django-appointment==3.8.0 django-appointment==3.8.0
django-autoslug==1.9.9 django-autoslug==1.9.9
django-background-tasks==1.2.8 django-background-tasks==1.2.8
django-bootstrap5==25.1 django-bootstrap5==25.1
django-ckeditor==6.7.2
django-classy-tags==4.1.0 django-classy-tags==4.1.0
django-cors-headers==4.7.0 django-cors-headers==4.7.0
django-countries==7.6.1 django-countries==7.6.1
@ -56,6 +59,7 @@ django-extensions==4.1
django-filter==25.1 django-filter==25.1
django-formtools==2.5.1 django-formtools==2.5.1
django-import-export==4.3.7 django-import-export==4.3.7
django-js-asset==3.1.2
django-ledger==0.7.7 django-ledger==0.7.7
django-model-utils==5.0.0 django-model-utils==5.0.0
django-money==3.5.4 django-money==3.5.4
@ -64,7 +68,7 @@ django-nine==0.2.7
django-nonefield==0.4 django-nonefield==0.4
django-ordered-model==3.7.4 django-ordered-model==3.7.4
django-pdf-actions==0.1.49 django-pdf-actions==0.1.49
django-phonenumber-field==8.1.0 django-phonenumber-field==8.0.0
django-picklefield==3.3 django-picklefield==3.3
django-plans==2.0.0 django-plans==2.0.0
django-prometheus==2.3.1 django-prometheus==2.3.1
@ -79,23 +83,25 @@ django-sslserver==0.22
django-tables2==2.7.5 django-tables2==2.7.5
django-treebeard==4.7.1 django-treebeard==4.7.1
django-view-breadcrumbs==2.5.1 django-view-breadcrumbs==2.5.1
django-widget-tweaks==1.5.0
djangocms-admin-style==3.3.1 djangocms-admin-style==3.3.1
djangorestframework==3.16.0 djangorestframework==3.16.0
djangorestframework_simplejwt==5.5.0 djangorestframework_simplejwt==5.5.0
djangoviz==0.1.1 djangoviz==0.1.1
djhtml==3.0.8
docopt==0.6.2 docopt==0.6.2
docutils==0.21.2 docutils==0.21.2
easy-thumbnails==2.10 easy-thumbnails==2.10
emoji==2.14.1 emoji==2.14.1
et_xmlfile==2.0.0 et_xmlfile==2.0.0
Faker==37.1.0 Faker==37.3.0
filelock==3.18.0 filelock==3.18.0
fire==0.7.0 fire==0.7.0
Flask==3.1.0
fonttools==4.58.0 fonttools==4.58.0
fpdf==1.7.2
fpdf2==2.8.3 fpdf2==2.8.3
frozenlist==1.6.0 frozenlist==1.6.0
fsspec==2025.3.2 fsspec==2025.5.1
gprof2dot==2025.4.14 gprof2dot==2025.4.14
graphqlclient==0.2.4 graphqlclient==0.2.4
greenlet==3.2.2 greenlet==3.2.2
@ -105,8 +111,9 @@ hpack==4.1.0
hstspreload==2025.1.1 hstspreload==2025.1.1
httpcore==1.0.9 httpcore==1.0.9
httpx==0.28.1 httpx==0.28.1
httpx-sse==0.4.0
hyperframe==6.1.0 hyperframe==6.1.0
icalendar==6.2.0 icalendar==6.3.1
idna==3.10 idna==3.10
imageio==2.37.0 imageio==2.37.0
imagesize==1.4.1 imagesize==1.4.1
@ -116,9 +123,17 @@ isodate==0.7.2
isort==6.0.1 isort==6.0.1
itsdangerous==2.2.0 itsdangerous==2.2.0
Jinja2==3.1.6 Jinja2==3.1.6
jiter==0.9.0 jiter==0.10.0
joblib==1.5.0 joblib==1.5.1
jsonpatch==1.33
jsonpointer==3.0.0
jwt==1.3.1
kiwisolver==1.4.8 kiwisolver==1.4.8
langchain==0.3.25
langchain-community==0.3.24
langchain-core==0.3.61
langchain-text-splitters==0.3.8
langsmith==0.3.42
lazy_loader==0.4 lazy_loader==0.4
ledger==1.0.1 ledger==1.0.1
libretranslatepy==2.1.4 libretranslatepy==2.1.4
@ -128,54 +143,56 @@ lxml==5.4.0
Markdown==3.8 Markdown==3.8
markdown-it-py==3.0.0 markdown-it-py==3.0.0
MarkupSafe==3.0.2 MarkupSafe==3.0.2
marshmallow==4.0.0 marshmallow==3.26.1
matplotlib==3.10.3 matplotlib==3.10.3
mccabe==0.7.0 mccabe==0.7.0
mdurl==0.1.2 mdurl==0.1.2
MouseInfo==0.1.3 MouseInfo==0.1.3
mpmath==1.3.0 mpmath==1.3.0
multidict==6.4.3 multidict==6.4.4
mypy_extensions==1.1.0 mypy_extensions==1.1.0
networkx==3.4.2 networkx==3.4.2
newrelic==10.11.0 newrelic==10.12.0
nltk==3.9.1 nltk==3.9.1
num2words==0.5.14 num2words==0.5.14
numpy==2.2.5 numpy==2.2.6
oauthlib==3.2.2 oauthlib==3.2.2
ofxtools==0.9.5 ofxtools==0.9.5
openai==1.78.1 openai==1.82.0
opencv-contrib-python==4.11.0.86 opencv-contrib-python==4.11.0.86
opencv-python==4.11.0.86 opencv-python==4.11.0.86
opencv-python-headless==4.11.0.86 opencv-python-headless==4.11.0.86
openpyxl==3.1.5 openpyxl==3.1.5
opt_einsum==3.4.0 opt_einsum==3.4.0
orjson==3.10.18
outcome==1.3.0.post0 outcome==1.3.0.post0
packaging==25.0 packaging==24.2
pandas==2.2.3 pandas==2.2.3
pango==0.0.1 pango==0.0.1
pdfkit==1.0.0 pdfkit==1.0.0
phonenumbers==8.13.42 phonenumbers==8.13.42
pillow==10.4.0 pillow==10.4.0
platformdirs==4.3.8 platformdirs==4.3.8
prometheus_client==0.21.1 prometheus_client==0.22.0
propcache==0.3.1 propcache==0.3.1
protobuf==6.30.2 protobuf==6.31.0
psycopg==3.2.8 psycopg==3.2.9
psycopg-binary==3.2.8 psycopg-binary==3.2.9
psycopg-c==3.2.8 psycopg-c==3.2.9
psycopg2-binary==2.9.10 psycopg2-binary==2.9.10
py-moneyed==3.0 py-moneyed==3.0
PyAutoGUI==0.9.54 PyAutoGUI==0.9.54
pyclipper==1.3.0.post6 pyclipper==1.3.0.post6
pycodestyle==2.13.0 pycodestyle==2.13.0
pycparser==2.22 pycparser==2.22
pydantic==2.11.4 pydantic==2.11.5
pydantic-settings==2.9.1
pydantic_core==2.33.2 pydantic_core==2.33.2
pydotplus==2.0.2 pydotplus==2.0.2
pydyf==0.11.0 pydyf==0.11.0
PyGetWindow==0.0.9 PyGetWindow==0.0.9
Pygments==2.19.1 Pygments==2.19.1
PyJWT==2.9.0 PyJWT==2.10.1
pylint==3.3.7 pylint==3.3.7
PyMsgBox==1.0.9 PyMsgBox==1.0.9
PyMySQL==1.1.1 PyMySQL==1.1.1
@ -184,7 +201,6 @@ pyobjc-framework-Cocoa==11.0
pyobjc-framework-Quartz==11.0 pyobjc-framework-Quartz==11.0
pyparsing==3.2.3 pyparsing==3.2.3
pypdf==5.5.0 pypdf==5.5.0
PyPDF2==3.0.1
pyperclip==1.9.0 pyperclip==1.9.0
pyphen==0.17.2 pyphen==0.17.2
pypng==0.20220715.0 pypng==0.20220715.0
@ -197,22 +213,25 @@ python-dateutil==2.9.0.post0
python-docx==1.1.2 python-docx==1.1.2
python-dotenv==1.1.0 python-dotenv==1.1.0
python-openid==2.2.5 python-openid==2.2.5
python-stdnum==2.0 python-slugify==8.0.4
python-stdnum==2.1
python3-saml==1.16.0 python3-saml==1.16.0
pytweening==1.2.0 pytweening==1.2.0
pytz==2025.2 pytz==2025.2
pyvin==0.0.2 pyvin==0.0.2
pywa==2.9.0 pywa==2.10.0
pywhat==5.1.0 pywhat==5.1.0
pywhatkit==5.4 pywhatkit==5.4
PyYAML==6.0.2 PyYAML==6.0.2
pyzbar==0.1.9 pyzbar==0.1.9
qrcode==8.2 qrcode==8.2
RapidFuzz==3.13.0 RapidFuzz==3.13.0
redis==6.1.0
regex==2024.11.6 regex==2024.11.6
reportlab==4.4.0 reportlab==4.4.1
requests==2.32.3 requests==2.32.3
requests-oauthlib==2.0.0 requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
rfc3986==2.0.0 rfc3986==2.0.0
rich==14.0.0 rich==14.0.0
rubicon-objc==0.5.0 rubicon-objc==0.5.0
@ -220,16 +239,17 @@ sacremoses==0.1.1
scikit-image==0.25.2 scikit-image==0.25.2
scikit-learn==1.6.1 scikit-learn==1.6.1
scipy==1.15.3 scipy==1.15.3
selenium==4.32.0 selenium==4.33.0
sentencepiece==0.2.0 sentencepiece==0.2.0
shapely==2.1.0 shapely==2.1.1
simsimd==6.2.1 simsimd==6.2.1
six==1.17.0 six==1.17.0
slugify==0.0.1
sniffio==1.3.1 sniffio==1.3.1
snowballstemmer==3.0.1 snowballstemmer==3.0.1
sortedcontainers==2.4.0 sortedcontainers==2.4.0
soupsieve==2.7 soupsieve==2.7
SQLAlchemy==2.0.40 SQLAlchemy==2.0.41
sqlparse==0.5.3 sqlparse==0.5.3
stanza==1.10.1 stanza==1.10.1
stringzilla==3.12.5 stringzilla==3.12.5
@ -237,9 +257,11 @@ suds==1.2.0
swapper==1.3.0 swapper==1.3.0
sympy==1.14.0 sympy==1.14.0
tablib==3.8.0 tablib==3.8.0
tenacity==9.1.2
termcolor==3.1.0 termcolor==3.1.0
text-unidecode==1.3
threadpoolctl==3.6.0 threadpoolctl==3.6.0
tifffile==2025.5.10 tifffile==2025.5.24
tinycss2==1.4.0 tinycss2==1.4.0
tinyhtml5==2.0.0 tinyhtml5==2.0.0
tomli==2.2.1 tomli==2.2.1
@ -248,9 +270,10 @@ torch==2.7.0
tqdm==4.67.1 tqdm==4.67.1
trio==0.30.0 trio==0.30.0
trio-websocket==0.12.2 trio-websocket==0.12.2
twilio==9.6.0 twilio==9.6.1
types-python-dateutil==2.9.0.20250516
typing-inspect==0.9.0 typing-inspect==0.9.0
typing-inspection==0.4.0 typing-inspection==0.4.1
typing_extensions==4.13.2 typing_extensions==4.13.2
tzdata==2025.2 tzdata==2025.2
Unidecode==1.4.0 Unidecode==1.4.0
@ -260,6 +283,7 @@ vin==0.6.2
vininfo==1.8.0 vininfo==1.8.0
vishap==0.1.5 vishap==0.1.5
vpic-api==0.7.4 vpic-api==0.7.4
wcwidth==0.2.13
weasyprint==65.1 weasyprint==65.1
webencodings==0.5.1 webencodings==0.5.1
websocket-client==1.8.0 websocket-client==1.8.0
@ -269,3 +293,4 @@ wsproto==1.2.0
xmlsec==1.3.15 xmlsec==1.3.15
yarl==1.20.0 yarl==1.20.0
zopfli==0.2.3.post1 zopfli==0.2.3.post1
zstandard==0.23.0

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

View File

@ -41,6 +41,7 @@
<div class="tab-pane active" role="tabpanel" aria-labelledby="bootstrap-wizard-validation-tab1" id="bootstrap-wizard-validation-tab1"> <div class="tab-pane active" role="tabpanel" aria-labelledby="bootstrap-wizard-validation-tab1" id="bootstrap-wizard-validation-tab1">
<form class="needs-validation" id="wizardValidationForm1" novalidate="novalidate" data-wizard-form="1"> <form class="needs-validation" id="wizardValidationForm1" novalidate="novalidate" data-wizard-form="1">
{{form1|crispy}} {{form1|crispy}}
<a class="fs-10 text-decoration-none" href="{% url 'terms_and_privacy' %}" target="_blank">{{ _("Read Terms of Service and Privacy Policy")}}</a>
</form> </form>
</div> </div>
<div class="tab-pane" role="tabpanel" aria-labelledby="bootstrap-wizard-validation-tab2" id="bootstrap-wizard-validation-tab2"> <div class="tab-pane" role="tabpanel" aria-labelledby="bootstrap-wizard-validation-tab2" id="bootstrap-wizard-validation-tab2">

View File

@ -5,14 +5,12 @@
{% block customCSS %} {% block customCSS %}
<style> <style>
.kanban-column { .kanban-column {
background-color: #f8f9fa;
border-radius: 8px; border-radius: 8px;
padding: 1rem; padding: 1rem;
min-height: 500px; min-height: 500px;
} }
.kanban-header { .kanban-header {
position: relative; position: relative;
background-color:rgb(237, 241, 245);
font-weight: 600; font-weight: 600;
padding: 0.5rem 1rem; padding: 0.5rem 1rem;
margin-bottom: 1rem; margin-bottom: 1rem;
@ -33,7 +31,6 @@
border-left: 20px solid #dee2e6; border-left: 20px solid #dee2e6;
} }
.lead-card { .lead-card {
background-color: white;
border: 1px solid #dee2e6; border: 1px solid #dee2e6;
border-radius: 8px; border-radius: 8px;
padding: 0.75rem; padding: 0.75rem;
@ -62,15 +59,17 @@
{% endblock customCSS %} {% endblock customCSS %}
{% block content %} {% block content %}
<div class="container-fluid my-4"> <div class="container-fluid my-4">
<div class="row justify-content-center">
<div class="col">
<div class="d-flex justify-content-between mb-3"> <div class="d-flex justify-content-between mb-3">
<h3>Lead Tracking</h3> <h3>{{ _("Lead Tracking")}}</h3>
</div> </div>
<div class="row g-3"> <div class="row g-3">
<!-- New Lead --> <!-- New Lead -->
<div class="col-md"> <div class="col-md">
<div class="kanban-column"> <div class="kanban-column bg-body">
<div class="kanban-header">New Leads ({{new|length}})</div> <div class="kanban-header bg-secondary-light">{{ _("New Leads")}} ({{new|length}})</div>
{% for lead in new %} {% for lead in new %}
<a href="{% url 'lead_detail' lead.slug %}"> <a href="{% url 'lead_detail' lead.slug %}">
<div class="lead-card"> <div class="lead-card">
@ -85,8 +84,8 @@
<!-- Follow Ups --> <!-- Follow Ups -->
<div class="col-md"> <div class="col-md">
<div class="kanban-column"> <div class="kanban-column bg-body">
<div class="kanban-header">Follow Ups ({{follow_up|length}})</div> <div class="kanban-header bg-info-light">{{ _("Follow Ups")}} ({{follow_up|length}})</div>
{% for lead in follow_up %} {% for lead in follow_up %}
<a href="{% url 'lead_detail' lead.slug %}"> <a href="{% url 'lead_detail' lead.slug %}">
<div class="lead-card"> <div class="lead-card">
@ -101,8 +100,8 @@
<!-- Negotiation --> <!-- Negotiation -->
<div class="col-md"> <div class="col-md">
<div class="kanban-column"> <div class="kanban-column bg-body">
<div class="kanban-header">Negotiation ({{negotiation|length}})</div> <div class="kanban-header bg-negotiation-soft">{{ _("Negotiation") }} ({{negotiation|length}})</div>
{% for lead in negotiation %} {% for lead in negotiation %}
<a href="{% url 'lead_detail' lead.slug %}"> <a href="{% url 'lead_detail' lead.slug %}">
<div class="lead-card"> <div class="lead-card">
@ -117,8 +116,8 @@
<!-- Won --> <!-- Won -->
<div class="col-md"> <div class="col-md">
<div class="kanban-column"> <div class="kanban-column bg-body">
<div class="kanban-header">Won ({{won|length}})</div> <div class="kanban-header bg-success-soft">{{ _("Won") }} ({{won|length}})</div>
{% for lead in won %} {% for lead in won %}
<a href="{% url 'lead_detail' lead.slug %}"> <a href="{% url 'lead_detail' lead.slug %}">
<div class="lead-card"> <div class="lead-card">
@ -133,8 +132,8 @@
<!-- Lose --> <!-- Lose -->
<div class="col-md"> <div class="col-md">
<div class="kanban-column"> <div class="kanban-column bg-body">
<div class="kanban-header">Lose ({{lose|length}})</div> <div class="kanban-header bg-danger-soft">{{ _("Lost") }} ({{lose|length}})</div>
{% for lead in lose %} {% for lead in lose %}
<a href="{% url 'lead_detail' lead.slug %}"> <a href="{% url 'lead_detail' lead.slug %}">
<div class="lead-card"> <div class="lead-card">
@ -149,4 +148,6 @@
</div> </div>
</div> </div>
</div>
</div>
{% endblock %} {% endblock %}

View File

@ -1,5 +1,5 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% load static i18n %} {% load static i18n humanize%}
{% block customCSS %} {% block customCSS %}
<style> <style>

View File

@ -1,89 +1,457 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% load i18n static %} {% load i18n static %}
{% block title %}{{ _("HaikalBot") }}{% endblock title %} {% block title %}
{{ _("Haikalbot") }}
{% endblock %}
{% block description %}
AI assistant
{% endblock %}
{% block customCSS %}
<!-- No custom CSS as requested -->
{% endblock %}
{% block content %} {% block content %}
<style> <style>
.chat-container {
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
.chat-textarea {
border-radius: 20px;
padding: 15px;
resize: none;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
border: 1px solid #dee2e6;
transition: all 0.3s ease;
}
.chat-textarea:focus {
border-color: #86b7fe;
box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
outline: none;
}
.send-button {
border-radius: 20px;
padding: 10px 25px;
font-weight: 500;
transition: all 0.3s ease;
}
.textarea-container {
position: relative;
}
.textarea-footer {
display: flex;
justify-content: space-between;
align-items: center;
margin-top: 8px;
font-size: 0.8rem;
color: #6c757d;
}
</style>
<div class="card shadow-none mb-3">
<div class="card-header d-flex justify-content-between align-items-center">
#chatbox { <div class="d-flex align-items-center fw-bolder fs-3 d-inline-block">
border: 1px solid #ccc; <img class="d-dark-none" src="{% static 'images/favicons/haikalbot_v1.png' %}" alt="{% trans 'home' %}" width="32" />
border-radius: 5px; <img class="d-light-none" src="{% static 'images/favicons/haikalbot_v2.png' %}" alt="{% trans 'home' %}" width="32" />
padding: 10px; </div>
height: 200px;
overflow-y: scroll;
<div class="d-flex gap-3">
<span id="clearChatBtn" class="translate-middle-y cursor-pointer" title="{% if LANGUAGE_CODE == 'ar' %}مسح المحادثة{% else %}Clear Chat{% endif %}">
<i class="fas fa-trash-alt text-danger"></i>
</span>
<span id="exportChatBtn" class="translate-middle-y cursor-pointer" title="{% if LANGUAGE_CODE == 'ar' %}تصدير المحادثة{% else %}Export Chat{% endif %}">
<i class="fas fa-download text-success"></i>
</span>
</div>
</div>
<div class="card-body p-0">
<div id="chatMessages" class="overflow-auto p-3" style="height: 60vh;">
<!-- Chat messages will be appended here -->
</div>
<div class="bg-100 border-top p-3">
<div class="d-flex gap-2 flex-wrap mb-3" id="suggestionChips">
<button class="btn btn-sm btn-outline-primary suggestion-chip">
{{ _("How many cars are in inventory")}}?
</button>
<button class="btn btn-sm btn-outline-primary suggestion-chip">
{{ _("Show me sales analysis")}}
</button>
<button class="btn btn-sm btn-outline-primary suggestion-chip">
{{ _("What are the best-selling cars")}}?
</button>
</div>
<div class="chat-container">
<div class="textarea-container mb-3">
<label for="messageInput"></label>
<textarea class="form-control chat-textarea" id="messageInput" rows="3"
placeholder="{{ _("Type your message here")}}..."></textarea>
<div class="textarea-footer">
<div class="character-count">
<span id="charCount">0</span>/400
</div>
<span class="send-button position-absolute top-50 end-0 translate-middle-y cursor-pointer"
id="sendMessageBtn" disabled>
<i class="fas fa-paper-plane text-body"></i>
</span>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<script>
$(document).ready(function() {
const messageInput = $('#messageInput');
const charCount = $('#charCount');
const sendMessageBtn = $('#sendMessageBtn');
const chatMessages = $('#chatMessages');
const clearChatBtn = $('#clearChatBtn');
const exportChatBtn = $('#exportChatBtn');
const suggestionChips = $('.suggestion-chip');
// Enable/disable send button based on input
messageInput.on('input', function() {
sendMessageBtn.prop('disabled', !messageInput.val().trim());
// Auto-resize textarea
this.style.height = 'auto';
this.style.height = (this.scrollHeight) + 'px';
});
// Send message on Enter key (but allow Shift+Enter for new line)
messageInput.on('keydown', function(e) {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
if (!sendMessageBtn.prop('disabled')) {
sendMessage();
}
}
});
// Send message on button click
sendMessageBtn.on('click', sendMessage);
// Use suggestion chips
suggestionChips.on('click', function() {
messageInput.val($(this).text().trim());
sendMessageBtn.prop('disabled', false);
sendMessage();
});
// Clear chat
clearChatBtn.on('click', function() {
if (confirm('{% if LANGUAGE_CODE == "ar" %}هل أنت متأكد من أنك تريد مسح المحادثة؟{% else %}Are you sure you want to clear the chat?{% endif %}')) {
// Keep only the first welcome message
const welcomeMessage = chatMessages.children().first();
chatMessages.empty().append(welcomeMessage);
}
});
// Export chat
exportChatBtn.on('click', function() {
let chatContent = '';
$('.message').each(function() {
const isUser = $(this).hasClass('user-message');
const sender = isUser ? '{% if LANGUAGE_CODE == "ar" %}أنت{% else %}You{% endif %}' : '{% if LANGUAGE_CODE == "ar" %}المساعد الذكي{% else %}AI Assistant{% endif %}';
const text = $(this).find('.chat-message').text().trim();
const time = $(this).find('.text-400').text().trim();
chatContent += `${sender} (${time}):\n${text}\n\n`;
});
// Create and trigger download
const blob = new Blob([chatContent], { type: 'text/plain' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = 'chat-export-' + new Date().toISOString().slice(0, 10) + '.txt';
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
});
// Copy message text
$(document).on('click', '.copy-btn', function() {
const text = $(this).closest('.d-flex').find('.chat-message').text().trim();
navigator.clipboard.writeText(text).then(() => {
// Show temporary success indicator
const originalIcon = $(this).html();
$(this).html('<i class="fas fa-check"></i>');
setTimeout(() => {
$(this).html(originalIcon);
}, 1500);
});
});
// Function to send message
function sendMessage() {
const message = messageInput.val().trim();
if (!message) return;
// Add user message to chat
addMessage(message, true);
// Clear input and reset height
messageInput.val('').css('height', 'auto');
sendMessageBtn.prop('disabled', true);
// Show typing indicator
showTypingIndicator();
// Send to backend
$.ajax({
url: '{% url "haikalbot:haikalbot" %}',
type: 'POST',
contentType: 'application/json',
data: JSON.stringify({
prompt: message,
language: '{{ LANGUAGE_CODE }}'
}),
headers: {
'X-CSRFToken': '{{ csrf_token }}'
},
success: function(response) {
// Hide typing indicator
hideTypingIndicator();
// Process response
let botResponse = '';
if (response.response) {
botResponse = response.response;
} else if (response.insights) {
// Format insights as a readable response
botResponse = formatInsightsResponse(response);
} else {
botResponse = '{% if LANGUAGE_CODE == "ar" %}عذرًا، لم أتمكن من معالجة طلبك.{% else %}Sorry, I couldn\'t process your request.{% endif %}';
}
// Add bot response to chat
addMessage(botResponse, false);
// Scroll to bottom
scrollToBottom();
},
error: function(xhr, status, error) {
// Hide typing indicator
hideTypingIndicator();
// Add error message
const errorMsg = '{% if LANGUAGE_CODE == "ar" %}عذرًا، حدث خطأ أثناء معالجة طلبك. يرجى المحاولة مرة أخرى.{% else %}Sorry, an error occurred while processing your request. Please try again.{% endif %}';
addMessage(errorMsg, false);
console.error('Error:', error);
}
});
} }
</style> // Function to add message to chat
function addMessage(text, isUser) {
const time = new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
const messageClass = isUser ? 'user-message justify-content-end' : '';
<div class="row p-2"> let avatarHtml = '';
<div class="card shadow-sm rounded shadow"> let messageHtml = '';
<div class="card-header bg-primary text-white">
<h4 class="mb-0">{% trans 'HaikalBot' %}</h4>
</div>
<div class="card-body">
<div id="chatbox"> if (isUser) {
<p><b>{% trans 'HaikalBot' %}:</b> {% trans 'Hello! How can I assist you today?' %}</p> // User message
avatarHtml = `
<div class="avatar avatar-l ms-3 order-1">
<div class="avatar-name rounded-circle bg-primary text-white"><span><i class="fas fa-user"></i></span></div>
</div>
`;
// Process text (no markdown for user messages)
messageHtml = `
<div class="flex-1 order-0">
<div class="w-xxl-75 ms-auto">
<div class="d-flex hover-actions-trigger align-items-center">
<div class="hover-actions start-0 top-50 translate-middle-y">
<button class="btn btn-phoenix-secondary btn-icon fs--2 round-btn copy-btn" type="button" title="{% if LANGUAGE_CODE == 'ar' %}نسخ{% else %}Copy{% endif %}">
<i class="fas fa-copy"></i>
</button>
</div>
<div class="chat-message p-3 rounded-2">
${text}
</div>
</div>
<div class="text-400 fs--2">
${time}
</div>
</div>
</div>
`;
} else {
// Bot message
avatarHtml = `
<div class="me-3">
<div class="d-flex align-items-center fw-bolder fs-3 d-inline-block">
<img class="d-dark-none" src="{% static 'images/favicons/haikalbot_v1.png' %}" width="32" />
<img class="d-light-none" src="{% static 'images/favicons/haikalbot_v2.png' %}" width="32" />
</div>
</div>
`;
// Process markdown for bot messages
const processedText = marked.parse(text);
messageHtml = `
<div class="flex-1">
<div class="w-xxl-75">
<div class="d-flex hover-actions-trigger align-items-center">
<div class="chat-message bg-200 p-3 rounded-2">
${processedText}
</div>
<div class="hover-actions end-0 top-50 translate-middle-y">
<button class="btn btn-phoenix-secondary btn-icon fs--2 round-btn copy-btn" type="button" title="{{_("Copy")}}">
<i class="fas fa-copy"></i>
</button>
</div>
</div>
<div class="text-400 fs--2 text-end">
${time}
</div>
</div>
</div>
`;
}
const fullMessageHtml = `
<div class="message d-flex mb-3 ${messageClass}">
${avatarHtml}
${messageHtml}
</div> </div>
<label for="userMessage"></label> `;
<input class="form-control form-control-sm"
type="text" id="userMessage"
placeholder="{% trans 'Type your message here...' %}" />
<button class="btn btn-sm btn-success m-2" onclick="sendMessage()">{% trans 'Send' %}</button>
</div>
</div>
<!-- Script to send to api --> chatMessages.append(fullMessageHtml);
<script> scrollToBottom();
function getCookie(name) { }
let cookieValue = null;
if (document.cookie && document.cookie !== '') { // Function to show typing indicator
const cookies = document.cookie.split(';'); function showTypingIndicator() {
for (let cookie of cookies) { const typingHtml = `
cookie = cookie.trim(); <div class="message d-flex mb-3" id="typingIndicator">
if (cookie.substring(0, name.length + 1) === (name + '=')) { <div class="avatar avatar-l me-3">
cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); <div class="avatar-name rounded-circle"><span><i class="fas fa-robot"></i></span></div>
break; </div>
} <div class="flex-1 d-flex align-items-center">
<div class="spinner-border text-phoenix-secondary me-2" role="status"></div>
<span class="fs-9">{% if LANGUAGE_CODE == 'ar' %}جاري الكتابة...{% else %}Typing...{% endif %}</span>
</div>
</div>
`;
chatMessages.append(typingHtml);
scrollToBottom();
}
// Function to hide typing indicator
function hideTypingIndicator() {
$('#typingIndicator').remove();
}
// Function to scroll chat to bottom
function scrollToBottom() {
chatMessages.scrollTop(chatMessages[0].scrollHeight);
}
// Function to format insights response
function formatInsightsResponse(response) {
let formattedResponse = '';
const insightsKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'التحليلات' : 'insights';
const recsKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'التوصيات' : 'recommendations';
if (response[insightsKey] && response[insightsKey].length > 0) {
formattedResponse += '{{ LANGUAGE_CODE }}' === 'ar' ? '## نتائج التحليل\n\n' : '## Analysis Results\n\n';
response[insightsKey].forEach(insight => {
if (insight.type) {
formattedResponse += `### ${insight.type}\n\n`;
} }
}
return cookieValue;
}
const csrfToken = getCookie('csrftoken'); if (insight.results) {
insight.results.forEach(result => {
if (result.error) {
formattedResponse += `- **${result.model || ''}**: ${result.error}\n`;
} else if (result.count !== undefined) {
formattedResponse += `- **${result.model || ''}**: ${result.count}\n`;
} else if (result.value !== undefined) {
const fieldKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'الحقل' : 'field';
const statTypeKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'نوع_الإحصاء' : 'statistic_type';
formattedResponse += `- **${result.model || ''}**: ${result[statTypeKey]} of ${result[fieldKey]} = ${result.value}\n`;
}
});
formattedResponse += '\n';
}
if (insight.relationships) {
async function sendMessage() { formattedResponse += '{{ LANGUAGE_CODE }}' === 'ar' ? ' العلاقات:\n\n' : ' Relationships:\n\n';
const userMessage = document.getElementById("userMessage").value; insight.relationships.forEach(rel => {
const fromKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'من' : 'from';
if (!userMessage.trim()) { const toKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'إلى' : 'to';
alert("Please enter a message."); const typeKey = '{{ LANGUAGE_CODE }}' === 'ar' ? 'نوع' : 'type';
return; formattedResponse += `- ${rel[fromKey]} → ${rel[toKey]} (${rel[typeKey]})\n`;
} });
formattedResponse += '\n';
const response = await fetch("", { }
method: "POST",
headers: {
"Content-Type": "application/json",
"X-CSRFToken": csrfToken,
},
body: JSON.stringify({ message: userMessage }),
}); });
if (response) {
const data = await response.json();
const chatbox = document.getElementById("chatbox");
chatbox.innerHTML += `<p><b>{% trans 'You' %}:</b> ${userMessage}</p>`;
chatbox.innerHTML += `<p><b>{% trans 'HaikalBot' %}:</b> ${data.response}</p>`;
document.getElementById("userMessage").value = "";
chatbox.scrollTop = chatbox.scrollHeight;
} else {
alert("An error occurred.");
}
} }
</script>
if (response[recsKey] && response[recsKey].length > 0) {
formattedResponse += '{{ LANGUAGE_CODE }}' === 'ar' ? ' التوصيات\n\n' : ' Recommendations\n\n';
response[recsKey].forEach(rec => {
formattedResponse += `- ${rec}\n`;
});
}
return formattedResponse || '{{ LANGUAGE_CODE }}' === 'ar' ? 'تم تحليل البيانات بنجاح.' : 'Data analyzed successfully.';
}
// Initialize
scrollToBottom();
});
messageInput.addEventListener('input', function() {
const currentLength = this.value.length;
charCount.textContent = currentLength;
// Optional: Add warning when approaching limit
if (currentLength > 350) {
charCount.style.color = 'red';
} else {
charCount.style.color = 'inherit';
}
});
</script>
{% endblock %}
{% block customJS %}
<!-- JS will be loaded from static file or added separately -->
{% endblock %}
</div>
{% endblock content %}

View File

@ -268,6 +268,7 @@
<span class="nav-link-icon"><i class="fa-solid fa-book-open"></i></span><span class="nav-link-text">{% trans 'Reports' %}</span> <span class="nav-link-icon"><i class="fa-solid fa-book-open"></i></span><span class="nav-link-text">{% trans 'Reports' %}</span>
</div> </div>
</a> </a>
{% if request.user.dealer.entity %}
{% if perms.django_ledger.view_accountmodel %} {% if perms.django_ledger.view_accountmodel %}
<div class="parent-wrapper label-1"> <div class="parent-wrapper label-1">
<ul class="nav collapse parent" data-bs-parent="#navbarVerticalCollapse" id="nv-reports"> <ul class="nav collapse parent" data-bs-parent="#navbarVerticalCollapse" id="nv-reports">
@ -320,6 +321,7 @@
</ul> </ul>
</div> </div>
{% endif %} {% endif %}
{% endif %}
</div> </div>
{% endif %} {% endif %}
</li> </li>

View File

@ -0,0 +1,271 @@
{% extends 'base.html' %}
{% load static i18n %}
{% block title %}{{ _("Terms of use and privacy policy")}}{% endblock title %}
{% block content %}
<style>
h2 {
font-size: 1.7em;
}
h3 {
font-size: 1.5em;
}
</style>
<div class="content fs-9">
<div class="row">
<div class="col-6">
<!-- English Section -->
<h2>Date: 1/1/2025</h2>
<section id="terms">
<h2>Terms of Service</h2>
<p>Welcome to <strong>Haikal</strong>, an advanced car inventory management platform owned and operated by <strong>Tenhal Information Technology Company</strong> ("we", "our", "us"). By accessing or using the Haikal system ("the Service"), you agree to be legally bound by the terms outlined below.</p>
<h3>1. Acceptance of Terms</h3>
<p>By using the Service, you confirm that you are authorized to act on behalf of a business entity, agree to these Terms of Service, and comply with all applicable laws and regulations.</p>
<h3>2. Description of Service</h3>
<p>Haikal provides car dealers and authorized users with tools for managing car inventory, sales, branches, financial transactions, and analytics. Additional services may include integration with government systems, API access, and reporting modules.</p>
<h3>3. Account Registration & Security</h3>
<ul>
<li>You must register and maintain a secure account with accurate information.</li>
<li>You are solely responsible for any activity under your account.</li>
<li>You must notify us immediately if you suspect unauthorized access or breach of your account.</li>
</ul>
<h3>4. License and Restrictions</h3>
<ul>
<li>We grant you a non-exclusive, non-transferable, revocable license to use the Service in accordance with these terms.</li>
<li>You may not copy, modify, distribute, resell, reverse-engineer, or decompile any part of the Service.</li>
</ul>
<h3>5. User Obligations</h3>
<ul>
<li>You agree not to upload illegal, harmful, or offensive data to the system.</li>
<li>You are responsible for maintaining compliance with data privacy regulations when inputting customer data.</li>
<li>You must not attempt to access systems or data not explicitly made available to you.</li>
</ul>
<h3>6. Intellectual Property</h3>
<p>All content, software, user interface designs, databases, and trademarks within Haikal are the intellectual property of Tenhal Information Technology Company and are protected under local and international IP laws.</p>
<h3>7. Service Availability & Modifications</h3>
<ul>
<li>We aim to provide 99.9% uptime but do not guarantee uninterrupted access.</li>
<li>We may modify or discontinue parts of the service at any time with or without notice.</li>
</ul>
<h3>8. Third-Party Integrations</h3>
<p>We may integrate with external services such as VIN databases, payment processors, or government systems. Use of those services is subject to their own terms and privacy policies.</p>
<h3>9. Limitation of Liability</h3>
<p>To the fullest extent permitted by law, Tenhal is not liable for indirect, incidental, punitive, or consequential damages resulting from your use of the Service. Our total liability is limited to the amount you paid us in the last 12 months.</p>
<h3>10. Termination</h3>
<p>We may suspend or terminate your access if you violate these terms. Upon termination, your access to the Service and associated data may be revoked or deleted.</p>
<h3>11. Governing Law</h3>
<p>These terms are governed by the laws of the Kingdom of Saudi Arabia. Any disputes will be resolved exclusively in courts located in Riyadh.</p>
</section>
<hr>
<section id="privacy">
<h2>Privacy Policy</h2>
<p>We value your privacy and are committed to protecting your personal and business data. This Privacy Policy explains how we collect, use, and protect your information when you use Haikal.</p>
<h3>1. Information We Collect</h3>
<ul>
<li><strong>Account Information:</strong> Name, email, phone number, user role, and login credentials.</li>
<li><strong>Business Data:</strong> Inventory details, financial transactions, customer and supplier records.</li>
<li><strong>Technical Data:</strong> IP addresses, browser types, login timestamps, session logs, device identifiers.</li>
</ul>
<h3>2. How We Use Your Information</h3>
<ul>
<li>To operate and improve the Service.</li>
<li>To secure accounts and prevent misuse or fraud.</li>
<li>To provide customer support and respond to inquiries.</li>
<li>To comply with legal obligations and cooperate with regulators when required.</li>
</ul>
<h3>3. Data Sharing</h3>
<ul>
<li>We do not sell your information to third parties.</li>
<li>We may share data with trusted processors (e.g., hosting, support tools) under strict confidentiality terms.</li>
<li>We may disclose data to authorities when legally required.</li>
</ul>
<h3>4. Data Storage and Security</h3>
<ul>
<li>Your data is stored securely on encrypted servers with access control policies in place.</li>
<li>We apply firewalls, intrusion detection, and regular audits to safeguard information.</li>
</ul>
<h3>5. Your Rights</h3>
<ul>
<li>You have the right to access, correct, or request deletion of your data.</li>
<li>You may contact us to object to processing or request data portability.</li>
</ul>
<h3>6. Data Retention</h3>
<p>We retain data as long as necessary to provide the service, comply with legal obligations, or enforce agreements. Upon request, we may anonymize or delete your data.</p>
<h3>7. Cookies and Tracking</h3>
<p>We may use cookies to enhance your experience. These may include session cookies, authentication tokens, and analytics tools.</p>
<h3>8. International Data Transfers</h3>
<p>If data is processed outside of Saudi Arabia, we ensure adequate protection via agreements and security standards aligned with applicable laws.</p>
<h3>9. Changes to this Policy</h3>
<p>We may revise this Privacy Policy from time to time. Updates will be posted here with a revised effective date.</p>
</section>
</div>
<div class="col-6" dir="rtl">
<h2>التاريخ: ١/١/٢٠٢٥</h2>
<!-- Arabic Section -->
<section class="arabic">
<h2>شروط الخدمة</h2>
<p>مرحبًا بك في <strong>هيكل</strong>، منصة متقدمة لإدارة مخزون السيارات، مملوكة وتديرها <strong>شركة تنحل لتقنية المعلومات</strong> ("نحن"، "خاصتنا"). باستخدامك لنظام هيكل، فإنك توافق على الالتزام القانوني بالشروط التالية:</p>
<h3>١. قبول الشروط</h3>
<p>باستخدامك للخدمة، فإنك تؤكد أنك مفوض بالتصرف نيابة عن كيان تجاري، وتوافق على شروط الخدمة هذه، وتلتزم بجميع القوانين والأنظمة المعمول بها.</p>
<h3>٢. وصف الخدمة</h3>
<p>يوفر هيكل أدوات لتجار السيارات والمستخدمين المخولين لإدارة المخزون، المبيعات، الفروع، المعاملات المالية، والتحليلات. تشمل الخدمات الإضافية تكاملات مع أنظمة حكومية، وصول API، وتقارير.</p>
<h3>٣. التسجيل والحماية</h3>
<ul>
<li>يجب تسجيل حساب دقيق وآمن.</li>
<li>أنت مسؤول عن كل نشاط يتم عبر حسابك.</li>
<li>يجب إبلاغنا فورًا عند الاشتباه في اختراق الحساب.</li>
</ul>
<h3>٤. الترخيص والقيود</h3>
<ul>
<li>نمنحك ترخيصًا غير حصري وقابل للإلغاء لاستخدام الخدمة.</li>
<li>لا يحق لك نسخ، تعديل، توزيع، أو عكس هندسة أي جزء من الخدمة.</li>
</ul>
<h3>٥. التزامات المستخدم</h3>
<ul>
<li>عدم تحميل بيانات غير قانونية أو ضارة.</li>
<li>أنت مسؤول عن الامتثال لقوانين خصوصية البيانات.</li>
<li>لا تحاول الوصول لبيانات أو أنظمة غير مصرّح بها.</li>
</ul>
<h3>٦. الملكية الفكرية</h3>
<p>جميع المحتويات، البرمجيات، قواعد البيانات، والتصاميم تخص تنحل وتخضع للقوانين المحلية والدولية.</p>
<h3>٧. توفر الخدمة والتعديلات</h3>
<ul>
<li>نهدف لتوفير الخدمة بنسبة تشغيل 99.9٪ ولكن لا نضمن عدم الانقطاع.</li>
<li>قد نقوم بتحديث أو تعديل أو إيقاف الخدمة في أي وقت.</li>
</ul>
<h3>٨. تكامل الأطراف الخارجية</h3>
<p>قد نتكامل مع خدمات خارجية مثل قواعد بيانات VIN، ومعالجات الدفع، والأنظمة الحكومية. يخضع استخدام هذه الخدمات لشروطها الخاصة.</p>
<h3>٩. حدود المسؤولية</h3>
<p>أقصى مسؤولية لنا عن أي ضرر غير مباشر أو عرضي تقتصر على ما دفعته خلال الـ 12 شهرًا الماضية.</p>
<h3>١٠. الإنهاء</h3>
<p>يجوز لنا إنهاء أو تعليق حسابك إذا انتهكت هذه الشروط. وقد يتم حذف بياناتك بعد الإنهاء.</p>
<h3>١١. القانون الحاكم</h3>
<p>تخضع هذه الشروط لقوانين المملكة العربية السعودية، ويكون الاختصاص القضائي لمحاكم الرياض فقط.</p>
</section>
<hr>
<section class="arabic">
<h2>سياسة الخصوصية</h2>
<p>نحن نهتم بخصوصيتك وملتزمون بحماية بياناتك الشخصية والتجارية. توضح هذه السياسة كيفية جمع واستخدام وحماية بياناتك عند استخدام نظام هيكل.</p>
<h3>١. المعلومات التي نجمعها</h3>
<ul>
<li><strong>بيانات الحساب:</strong> الاسم، البريد الإلكتروني، الهاتف، الدور، بيانات تسجيل الدخول.</li>
<li><strong>بيانات الأعمال:</strong> تفاصيل السيارات، المعاملات المالية، سجلات العملاء والموردين.</li>
<li><strong>بيانات تقنية:</strong> عناوين IP، أنواع المتصفحات، أوقات الدخول، سجلات الجلسات، معرفات الأجهزة.</li>
</ul>
<h3>٢. استخدام البيانات</h3>
<ul>
<li>لتشغيل الخدمة وتحسينها.</li>
<li>لحماية الحسابات ومنع الاحتيال.</li>
<li>لدعم العملاء والاستجابة للاستفسارات.</li>
<li>للالتزام بالقوانين والتعاون مع الجهات التنظيمية.</li>
</ul>
<h3>٣. مشاركة البيانات</h3>
<ul>
<li>لا نبيع بياناتك لأي طرف ثالث.</li>
<li>قد نشارك البيانات مع مزودين موثوقين بموجب اتفاقيات سرية.</li>
<li>قد نكشف عن البيانات للجهات المختصة عند الطلب القانوني.</li>
</ul>
<h3>٤. التخزين والحماية</h3>
<ul>
<li>تُخزن البيانات على خوادم مشفرة مع سياسات وصول صارمة.</li>
<li>نطبق جدران حماية، واكتشاف التسلل، ومراجعات دورية.</li>
</ul>
<h3>٥. حقوقك</h3>
<ul>
<li>لك الحق في الوصول إلى بياناتك أو تعديلها أو طلب حذفها.</li>
<li>يمكنك الاعتراض على المعالجة أو طلب نقل البيانات.</li>
</ul>
<h3>٦. الاحتفاظ بالبيانات</h3>
<p>نحتفظ بالبيانات طالما كانت ضرورية لتقديم الخدمة أو للامتثال للأنظمة. يمكننا إزالتها أو إخفاؤها حسب الطلب.</p>
<h3>٧. الكوكيز والتتبع</h3>
<p>قد نستخدم الكوكيز لتحسين تجربتك، بما في ذلك جلسات التوثيق والتحليلات.</p>
<h3>٨. نقل البيانات خارجياً</h3>
<p>إذا تم نقل البيانات خارج السعودية، نضمن حمايتها وفق اتفاقيات ومعايير قانونية مناسبة.</p>
<h3>٩. التحديثات</h3>
<p>قد نُجري تغييرات على هذه السياسة، وسيتم نشر التعديلات مع تاريخ سريان جديد.</p>
</section>
</div>
</div>
<div class="row border-top">
<div class="col-6">
<section id="contact">
<h2>Contact Information</h2>
<p>If you have any questions or concerns about these Terms or Privacy practices, please contact us:</p>
<p>
<strong>Tenhal Information Technology Company</strong><br>
Riyadh, Saudi Arabia<br>
📧 <a href="mailto:info@tenhal.sa">info@tenhal.sa</a><br>
🌐 <a href="https://www.tenhal.sa" target="_blank">www.tenhal.sa</a>
</p>
</section>
</div>
<div class="col-6" dir="rtl">
<section class="arabic">
<h2>معلومات التواصل</h2>
<p>لأي استفسار حول هذه الشروط أو سياسة الخصوصية، يرجى التواصل معنا:</p>
<p>
<strong>شركة تنحل لتقنية المعلومات</strong><br>
الرياض، المملكة العربية السعودية<br>
📧 <a href="mailto:info@tenhal.sa">info@tenhal.sa</a><br>
🌐 <a href="https://www.tenhal.sa" target="_blank">tenhal.sa</a>
</p>
</section>
</div>
</div>
</div>
{% endblock content %}

View File

@ -250,3 +250,36 @@ send_mail(
"OPC minivan 5-doors": "ميني فان OPC 5 أبواب", "OPC minivan 5-doors": "ميني فان OPC 5 أبواب",
"Hardtop 2-doors": "Hardtop 2 أبواب", "Hardtop 2-doors": "Hardtop 2 أبواب",
"JP-spec Sedan 4-doors": "جي بي مواصفات سيدان 4 أبواب", "JP-spec Sedan 4-doors": "جي بي مواصفات سيدان 4 أبواب",
python manage.py dumpdata inventory.CarMake --indent 4 > carmake.json
python manage.py dumpdata inventory.CarModel --indent 4 > carmodel.json
python manage.py dumpdata inventory.CarSerie --indent 4 > carserie.json
python manage.py dumpdata inventory.CarTrim --indent 4 > cartrim.json
python manage.py dumpdata inventory.CarEquipment --indent 4 > carequipment.json
python manage.py dumpdata inventory.CarSpecification --indent 4 > carspecification.json
python manage.py dumpdata inventory.CarSpecificationValue --indent 4 > carspecificationvalue.json
python manage.py dumpdata inventory.CarOption --indent 4 > caroption.json
python manage.py dumpdata inventory.CarOptionValue --indent 4 > caroptionvalue.json
python manage.py dumpdata plans.Plan --indent 4 > plan.json
python manage.py dumpdata plans.Pricing --indent 4 > pricing.json
python manage.py dumpdata plans.PlanPricing --indent 4 > planpricing.json
python manage.py dumpdata plans.Quota --indent 4 > quota.json
python manage.py dumpdata plans.PlanQuota --indent 4 > planquota.json
python manage.py loaddata carmake.json
python manage.py loaddata carmodel.json
python manage.py loaddata carserie.json
python manage.py loaddata cartrim.json
python manage.py loaddata carequipment.json
python manage.py loaddata carspecification.json
python manage.py loaddata carspecificationvalue.json
python manage.py loaddata caroption.json
python manage.py loaddata caroptionvalue.json
python manage.py loaddata plan.json
python manage.py loaddata pricing.json
python manage.py loaddata planpricing.json
python manage.py loaddata quota.json
python manage.py loaddata planquota.json

194
test_ollama.py Normal file
View File

@ -0,0 +1,194 @@
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "car_inventory.settings")
django.setup()
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from django.http import JsonResponse
import json
from unittest.mock import patch, MagicMock
from haikalbot.views import ModelAnalystView
from haikalbot.models import AnalysisCache
class ModelAnalystViewTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='testuser', email='test@example.com', password='testpass'
)
self.superuser = User.objects.create_superuser(
username='admin', email='admin@example.com', password='adminpass'
)
self.view = ModelAnalystView()
def test_post_without_prompt(self):
"""Test that the view returns an error when no prompt is provided."""
request = self.factory.post(
'/analyze/',
data=json.dumps({}),
content_type='application/json'
)
request.user = self.user
response = self.view.post(request)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['status'], 'error')
self.assertEqual(content['message'], 'Prompt is required')
def test_post_with_invalid_json(self):
"""Test that the view handles invalid JSON properly."""
request = self.factory.post(
'/analyze/',
data='invalid json',
content_type='application/json'
)
request.user = self.user
response = self.view.post(request)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['status'], 'error')
self.assertEqual(content['message'], 'Invalid JSON in request body')
@patch('ai_analyst.views.ModelAnalystView._process_prompt')
@patch('ai_analyst.views.ModelAnalystView._check_permissions')
@patch('ai_analyst.views.ModelAnalystView._generate_hash')
@patch('ai_analyst.views.ModelAnalystView._get_cached_result')
@patch('ai_analyst.views.ModelAnalystView._cache_result')
def test_post_with_valid_prompt(self, mock_cache_result, mock_get_cached,
mock_generate_hash, mock_check_permissions,
mock_process_prompt):
"""Test that the view processes a valid prompt correctly."""
# Setup mocks
mock_check_permissions.return_value = True
mock_generate_hash.return_value = 'test_hash'
mock_get_cached.return_value = None
mock_process_prompt.return_value = {
'status': 'success',
'insights': [{'type': 'test_insight'}]
}
# Create request
request = self.factory.post(
'/analyze/',
data=json.dumps({'prompt': 'How many cars do we have?', 'dealer_id': 1}),
content_type='application/json'
)
request.user = self.user
# Call view
response = self.view.post(request)
# Assertions
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['status'], 'success')
self.assertEqual(len(content['insights']), 1)
# Verify function calls
mock_check_permissions.assert_called_once_with(self.user, 1)
mock_generate_hash.assert_called_once_with('How many cars do we have?', 1)
mock_get_cached.assert_called_once_with('test_hash', self.user, 1)
mock_process_prompt.assert_called_once_with('How many cars do we have?', self.user, 1)
mock_cache_result.assert_called_once()
@patch('ai_analyst.views.ModelAnalystView._get_cached_result')
@patch('ai_analyst.views.ModelAnalystView._check_permissions')
@patch('ai_analyst.views.ModelAnalystView._generate_hash')
def test_post_with_cached_result(self, mock_generate_hash, mock_check_permissions, mock_get_cached):
"""Test that the view returns cached results when available."""
# Setup mocks
mock_check_permissions.return_value = True
mock_generate_hash.return_value = 'test_hash'
mock_get_cached.return_value = {
'status': 'success',
'insights': [{'type': 'cached_insight'}],
'cached': True
}
# Create request
request = self.factory.post(
'/analyze/',
data=json.dumps({'prompt': 'How many cars do we have?', 'dealer_id': 1}),
content_type='application/json'
)
request.user = self.user
# Call view
response = self.view.post(request)
# Assertions
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['status'], 'success')
self.assertEqual(content['cached'], True)
# Verify function calls
mock_check_permissions.assert_called_once_with(self.user, 1)
mock_generate_hash.assert_called_once_with('How many cars do we have?', 1)
mock_get_cached.assert_called_once_with('test_hash', self.user, 1)
def test_check_permissions_superuser(self):
"""Test that superusers have permission to access any dealer data."""
result = self.view._check_permissions(self.superuser, 1)
self.assertTrue(result)
result = self.view._check_permissions(self.superuser, None)
self.assertTrue(result)
def test_analyze_prompt_count(self):
"""Test that the prompt analyzer correctly identifies count queries."""
analysis_type, target_models, query_params = self.view._analyze_prompt("How many cars do we have?")
self.assertEqual(analysis_type, 'count')
self.assertEqual(target_models, ['Car'])
self.assertEqual(query_params, {})
analysis_type, target_models, query_params = self.view._analyze_prompt(
"Count the number of users with active status")
self.assertEqual(analysis_type, 'count')
self.assertEqual(target_models, ['User'])
self.assertTrue('active' in query_params or 'status' in query_params)
def test_analyze_prompt_relationship(self):
"""Test that the prompt analyzer correctly identifies relationship queries."""
analysis_type, target_models, query_params = self.view._analyze_prompt(
"Show relationship between User and Profile")
self.assertEqual(analysis_type, 'relationship')
self.assertTrue('User' in target_models and 'Profile' in target_models)
analysis_type, target_models, query_params = self.view._analyze_prompt(
"What is the User to Order relationship?")
self.assertEqual(analysis_type, 'relationship')
self.assertTrue('User' in target_models and 'Order' in target_models)
def test_analyze_prompt_statistics(self):
"""Test that the prompt analyzer correctly identifies statistics queries."""
analysis_type, target_models, query_params = self.view._analyze_prompt("What is the average price of cars?")
self.assertEqual(analysis_type, 'statistics')
self.assertEqual(target_models, ['Car'])
self.assertEqual(query_params['field'], 'price')
self.assertEqual(query_params['operation'], 'average')
analysis_type, target_models, query_params = self.view._analyze_prompt("Show maximum age of users")
self.assertEqual(analysis_type, 'statistics')
self.assertEqual(target_models, ['User'])
self.assertEqual(query_params['field'], 'age')
self.assertEqual(query_params['operation'], 'maximum')
def test_normalize_model_name(self):
"""Test that model names are correctly normalized."""
self.assertEqual(self.view._normalize_model_name('users'), 'User')
self.assertEqual(self.view._normalize_model_name('car'), 'Car')
self.assertEqual(self.view._normalize_model_name('orderItems'),
'OrderItem') # This would actually need more logic to handle camelCase