haikal/haikalbot/services/llm_service.py
2025-05-26 17:10:51 +03:00

150 lines
5.3 KiB
Python

import json
import logging
from django.apps import apps
from django.http import JsonResponse
from django.db.models import Count, Avg, Max, Min
from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
logger = logging.getLogger(__name__)
def get_llm_instance():
try:
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)
top_k = getattr(settings, 'OLLAMA_TOP_K', 40)
num_ctx = getattr(settings, 'OLLAMA_NUM_CTX', 4096)
num_predict = getattr(settings, 'OLLAMA_NUM_PREDICT', 2048)
return OllamaLLM(
base_url=base_url,
model=model,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_ctx=num_ctx,
num_predict=num_predict,
stop=["```", "</s>"],
repeat_penalty=1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def get_llm_chain(language='en'):
llm = get_llm_instance()
if not llm:
return None
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
return prompt_template | llm
def analyze_models_with_orm(analysis_type, target_models, query_params):
results = {}
for model_name in target_models:
try:
model = apps.get_model('your_app_name', model_name)
except LookupError:
results[model_name] = {"error": f"Model '{model_name}' not found"}
continue
try:
queryset = model.objects.filter(**query_params)
if analysis_type == 'count':
results[model_name] = {'count': queryset.count()}
elif analysis_type == 'statistics':
numeric_fields = [f.name for f in model._meta.fields if f.get_internal_type() in ['IntegerField', 'FloatField', 'DecimalField']]
stats = {}
for field in numeric_fields:
stats[field] = {
'avg': queryset.aggregate(avg=Avg(field))['avg'],
'max': queryset.aggregate(max=Max(field))['max'],
'min': queryset.aggregate(min=Min(field))['min']
}
results[model_name] = stats
elif analysis_type == 'relationship':
related = {}
for field in model._meta.get_fields():
if field.is_relation and field.many_to_one:
related[field.name] = queryset.values(field.name).annotate(count=Count(field.name)).count()
results[model_name] = related
elif analysis_type == 'performance':
results[model_name] = {'note': 'Performance analysis logic not implemented.'}
else:
results[model_name] = list(queryset.values())
except Exception as e:
results[model_name] = {'error': str(e)}
return results
def analyze_prompt_and_return_json(request):
try:
prompt = request.POST.get('prompt')
language = request.POST.get('language', 'en')
chain = get_llm_chain(language)
if not chain:
return JsonResponse({'success': False, 'error': 'LLM not initialized'})
result = chain.invoke({'prompt': prompt})
parsed = json.loads(result)
analysis_type = parsed.get('analysis_type')
target_models = parsed.get('target_models', [])
query_params = parsed.get('query_params', {})
if not analysis_type or not target_models:
return JsonResponse({'success': False, 'error': 'Incomplete analysis instruction returned by LLM'})
orm_results = analyze_models_with_orm(analysis_type, target_models, query_params)
return JsonResponse({'success': True, 'data': orm_results})
except Exception as e:
return JsonResponse({'success': False, 'error': str(e)})