80 lines
2.5 KiB
Plaintext
80 lines
2.5 KiB
Plaintext
from langchain_ollama import OllamaLLM
|
|
|
|
from langchain.chains import LLMChain
|
|
from langchain.prompts import PromptTemplate
|
|
from django.conf import settings
|
|
import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def get_ollama_llm():
|
|
"""
|
|
Initialize and return an Ollama LLM instance configured for Arabic support.
|
|
"""
|
|
try:
|
|
# Get settings from Django settings or use defaults
|
|
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
|
|
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
|
|
# timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
|
|
|
|
return OllamaLLM(
|
|
base_url=base_url,
|
|
model=model,
|
|
temperature= 0.2,
|
|
top_p= 0.8,
|
|
top_k= 40,
|
|
num_ctx= 4096,
|
|
num_predict= 2048,
|
|
stop= ["```", "</s>"],
|
|
repeat_penalty= 1.1,
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Error initializing Ollama LLM: {str(e)}")
|
|
return None
|
|
|
|
|
|
def create_prompt_analyzer_chain(language='ar'):
|
|
"""
|
|
Create a LangChain for analyzing prompts in Arabic or English.
|
|
"""
|
|
llm = get_ollama_llm()
|
|
if not llm:
|
|
return None
|
|
|
|
# Define the prompt template based on language
|
|
if language == 'ar':
|
|
template = """
|
|
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
|
|
|
|
الاستعلام: {prompt}
|
|
|
|
قم بتقديم إجابتك بتنسيق JSON كما يلي:
|
|
{{
|
|
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
|
|
"target_models": ["ModelName1", "ModelName2"],
|
|
"query_params": {{"field1": "value1", "field2": "value2"}}
|
|
}}
|
|
"""
|
|
else:
|
|
template = """
|
|
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
|
|
|
|
Prompt: {prompt}
|
|
|
|
Provide your answer in JSON format as follows:
|
|
{
|
|
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
|
|
"target_models": ["ModelName1", "ModelName2"],
|
|
"query_params": {"field1": "value1", "field2": "value2"}
|
|
}
|
|
"""
|
|
|
|
# Create the prompt template
|
|
prompt_template = PromptTemplate(
|
|
input_variables=["prompt"],
|
|
template=template
|
|
)
|
|
|
|
# Create and return the LLM chain
|
|
return prompt_template | llm |