This commit is contained in:
ismail 2025-05-26 17:10:51 +03:00
parent 97ebf23732
commit feb9204c16
2 changed files with 25 additions and 25 deletions

View File

@ -22,8 +22,8 @@ Add the following to your Django settings.py file:
```python ```python
# Ollama and LangChain settings # Ollama and LangChain settings
OLLAMA_BASE_URL = "http://localhost:11434" # Default Ollama API URL OLLAMA_BASE_URL = "http://10.10.1.132:11434" # Default Ollama API URL
OLLAMA_MODEL = "jais:13b" # Or your preferred model OLLAMA_MODEL = "qwen3:6b" # Or your preferred model
OLLAMA_TIMEOUT = 120 # Seconds OLLAMA_TIMEOUT = 120 # Seconds
``` ```
@ -46,10 +46,10 @@ def get_ollama_llm():
""" """
try: try:
# Get settings from Django settings or use defaults # Get settings from Django settings or use defaults
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434') base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'jais:13b') model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120) timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
# Configure Ollama with appropriate parameters for Arabic # Configure Ollama with appropriate parameters for Arabic
return Ollama( return Ollama(
base_url=base_url, base_url=base_url,
@ -74,14 +74,14 @@ def create_prompt_analyzer_chain(language='ar'):
llm = get_ollama_llm() llm = get_ollama_llm()
if not llm: if not llm:
return None return None
# Define the prompt template based on language # Define the prompt template based on language
if language == 'ar': if language == 'ar':
template = """ template = """
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام. قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
الاستعلام: {prompt} الاستعلام: {prompt}
قم بتقديم إجابتك بتنسيق JSON كما يلي: قم بتقديم إجابتك بتنسيق JSON كما يلي:
{{ {{
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general", "analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
@ -92,9 +92,9 @@ def create_prompt_analyzer_chain(language='ar'):
else: else:
template = """ template = """
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters. Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
Prompt: {prompt} Prompt: {prompt}
Provide your answer in JSON format as follows: Provide your answer in JSON format as follows:
{ {
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general", "analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
@ -102,13 +102,13 @@ def create_prompt_analyzer_chain(language='ar'):
"query_params": {"field1": "value1", "field2": "value2"} "query_params": {"field1": "value1", "field2": "value2"}
} }
""" """
# Create the prompt template # Create the prompt template
prompt_template = PromptTemplate( prompt_template = PromptTemplate(
input_variables=["prompt"], input_variables=["prompt"],
template=template template=template
) )
# Create and return the LLM chain # Create and return the LLM chain
return LLMChain(llm=llm, prompt=prompt_template) return LLMChain(llm=llm, prompt=prompt_template)
``` ```
@ -124,12 +124,12 @@ import re
class ModelAnalystView(View): class ModelAnalystView(View):
# ... existing code ... # ... existing code ...
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
# We'll initialize chains on demand to avoid startup issues # We'll initialize chains on demand to avoid startup issues
self.prompt_analyzer_chains = {} self.prompt_analyzer_chains = {}
def _get_prompt_analyzer_chain(self, language='ar'): def _get_prompt_analyzer_chain(self, language='ar'):
""" """
Get or create a prompt analyzer chain for the specified language. Get or create a prompt analyzer chain for the specified language.
@ -137,7 +137,7 @@ class ModelAnalystView(View):
if language not in self.prompt_analyzer_chains: if language not in self.prompt_analyzer_chains:
self.prompt_analyzer_chains[language] = create_prompt_analyzer_chain(language) self.prompt_analyzer_chains[language] = create_prompt_analyzer_chain(language)
return self.prompt_analyzer_chains[language] return self.prompt_analyzer_chains[language]
def _analyze_prompt_with_llm(self, prompt, language='ar'): def _analyze_prompt_with_llm(self, prompt, language='ar'):
""" """
Use LangChain and Ollama to analyze the prompt. Use LangChain and Ollama to analyze the prompt.
@ -148,10 +148,10 @@ class ModelAnalystView(View):
if not chain: if not chain:
# Fallback to rule-based analysis if chain creation failed # Fallback to rule-based analysis if chain creation failed
return self._analyze_prompt_rule_based(prompt, language) return self._analyze_prompt_rule_based(prompt, language)
# Run the chain # Run the chain
result = chain.run(prompt=prompt) result = chain.run(prompt=prompt)
# Parse the JSON result # Parse the JSON result
# Find JSON content within the response (in case the LLM adds extra text) # Find JSON content within the response (in case the LLM adds extra text)
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL) json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
@ -161,12 +161,12 @@ class ModelAnalystView(View):
else: else:
# Fallback to rule-based analysis # Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language) return self._analyze_prompt_rule_based(prompt, language)
except Exception as e: except Exception as e:
logger.error(f"Error in LLM prompt analysis: {str(e)}") logger.error(f"Error in LLM prompt analysis: {str(e)}")
# Fallback to rule-based analysis # Fallback to rule-based analysis
return self._analyze_prompt_rule_based(prompt, language) return self._analyze_prompt_rule_based(prompt, language)
def _analyze_prompt_rule_based(self, prompt, language='ar'): def _analyze_prompt_rule_based(self, prompt, language='ar'):
""" """
Rule-based fallback for prompt analysis. Rule-based fallback for prompt analysis.
@ -177,19 +177,19 @@ class ModelAnalystView(View):
"target_models": target_models, "target_models": target_models,
"query_params": query_params "query_params": query_params
} }
def _process_prompt(self, prompt, user, dealer_id, language='ar'): def _process_prompt(self, prompt, user, dealer_id, language='ar'):
""" """
Process the natural language prompt and generate insights. Process the natural language prompt and generate insights.
""" """
# ... existing code ... # ... existing code ...
# Use LLM for prompt analysis # Use LLM for prompt analysis
analysis_result = self._analyze_prompt_with_llm(prompt, language) analysis_result = self._analyze_prompt_with_llm(prompt, language)
analysis_type = analysis_result.get('analysis_type', 'general') analysis_type = analysis_result.get('analysis_type', 'general')
target_models = analysis_result.get('target_models', []) target_models = analysis_result.get('target_models', [])
query_params = analysis_result.get('query_params', {}) query_params = analysis_result.get('query_params', {})
# ... rest of the method ... # ... rest of the method ...
``` ```
@ -215,7 +215,7 @@ def test_ollama_connection():
if not llm: if not llm:
print("Failed to initialize Ollama LLM") print("Failed to initialize Ollama LLM")
return return
# Test with Arabic prompt # Test with Arabic prompt
arabic_prompt = "مرحبا، كيف حالك؟" arabic_prompt = "مرحبا، كيف حالك؟"
print(f"Testing Arabic prompt: {arabic_prompt}") print(f"Testing Arabic prompt: {arabic_prompt}")
@ -232,7 +232,7 @@ def test_prompt_analysis():
if not chain: if not chain:
print("Failed to create prompt analyzer chain") print("Failed to create prompt analyzer chain")
return return
# Test with an Arabic analysis prompt # Test with an Arabic analysis prompt
analysis_prompt = "كم عدد السيارات التي لدينا؟" analysis_prompt = "كم عدد السيارات التي لدينا؟"
print(f"Testing analysis prompt: {analysis_prompt}") print(f"Testing analysis prompt: {analysis_prompt}")

View File

@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
def get_llm_instance(): def get_llm_instance():
try: try:
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434') base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b') model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2) temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8) top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)