haikal/haikalbot/services/llm_service.py
Marwan Alwali 250e0aa7bb update
2025-05-26 15:17:10 +03:00

100 lines
3.5 KiB
Python

from langchain_ollama import OllamaLLM
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def get_llm_instance():
"""
Initialize and return an Ollama LLM instance configured for Arabic support.
This function creates a new LLM instance with optimized parameters for
both Arabic and English language processing. It reads configuration from
Django settings or uses sensible defaults.
:return: Configured OllamaLLM instance or None if initialization fails
:rtype: OllamaLLM or None
"""
try:
# Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)
top_k = getattr(settings, 'OLLAMA_TOP_K', 40)
num_ctx = getattr(settings, 'OLLAMA_NUM_CTX', 4096)
num_predict = getattr(settings, 'OLLAMA_NUM_PREDICT', 2048)
return OllamaLLM(
base_url=base_url,
model=model,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_ctx=num_ctx,
num_predict=num_predict,
stop=["```", "</s>"],
repeat_penalty=1.1,
)
except Exception as e:
logger.error(f"Error initializing Ollama LLM: {str(e)}")
return None
def get_llm_chain(language='en'):
"""
Create a LangChain for analyzing prompts in Arabic or English.
This function creates a chain that processes user prompts and extracts
structured information about the analysis request. It supports both
Arabic and English languages.
:param language: Language code ('en' or 'ar')
:type language: str
:return: LangChain for prompt analysis or None if initialization fails
:rtype: LLMChain or None
"""
llm = get_llm_instance()
if not llm:
return None
# Define the prompt template based on language
if language == 'ar':
template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {{"field1": "value1", "field2": "value2"}}
}}
"""
else:
template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt}
Provide your answer in JSON format as follows:
{
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
"target_models": ["ModelName1", "ModelName2"],
"query_params": {"field1": "value1", "field2": "value2"}
}
"""
# Create the prompt template
prompt_template = PromptTemplate(
input_variables=["prompt"],
template=template
)
# Create and return the LLM chain
return prompt_template | llm