update
This commit is contained in:
parent
97ebf23732
commit
feb9204c16
@ -22,8 +22,8 @@ Add the following to your Django settings.py file:
|
||||
|
||||
```python
|
||||
# Ollama and LangChain settings
|
||||
OLLAMA_BASE_URL = "http://localhost:11434" # Default Ollama API URL
|
||||
OLLAMA_MODEL = "jais:13b" # Or your preferred model
|
||||
OLLAMA_BASE_URL = "http://10.10.1.132:11434" # Default Ollama API URL
|
||||
OLLAMA_MODEL = "qwen3:6b" # Or your preferred model
|
||||
OLLAMA_TIMEOUT = 120 # Seconds
|
||||
```
|
||||
|
||||
@ -46,10 +46,10 @@ def get_ollama_llm():
|
||||
"""
|
||||
try:
|
||||
# Get settings from Django settings or use defaults
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
|
||||
model = getattr(settings, 'OLLAMA_MODEL', 'jais:13b')
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
|
||||
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
|
||||
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
|
||||
|
||||
|
||||
# Configure Ollama with appropriate parameters for Arabic
|
||||
return Ollama(
|
||||
base_url=base_url,
|
||||
@ -74,14 +74,14 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
llm = get_ollama_llm()
|
||||
if not llm:
|
||||
return None
|
||||
|
||||
|
||||
# Define the prompt template based on language
|
||||
if language == 'ar':
|
||||
template = """
|
||||
قم بتحليل الاستعلام التالي وتحديد نوع التحليل المطلوب ونماذج البيانات المستهدفة وأي معلمات استعلام.
|
||||
|
||||
|
||||
الاستعلام: {prompt}
|
||||
|
||||
|
||||
قم بتقديم إجابتك بتنسيق JSON كما يلي:
|
||||
{{
|
||||
"analysis_type": "count" أو "relationship" أو "performance" أو "statistics" أو "general",
|
||||
@ -92,9 +92,9 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
else:
|
||||
template = """
|
||||
Analyze the following prompt and determine the type of analysis required, target data models, and any query parameters.
|
||||
|
||||
|
||||
Prompt: {prompt}
|
||||
|
||||
|
||||
Provide your answer in JSON format as follows:
|
||||
{
|
||||
"analysis_type": "count" or "relationship" or "performance" or "statistics" or "general",
|
||||
@ -102,13 +102,13 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
"query_params": {"field1": "value1", "field2": "value2"}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
# Create the prompt template
|
||||
prompt_template = PromptTemplate(
|
||||
input_variables=["prompt"],
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
# Create and return the LLM chain
|
||||
return LLMChain(llm=llm, prompt=prompt_template)
|
||||
```
|
||||
@ -124,12 +124,12 @@ import re
|
||||
|
||||
class ModelAnalystView(View):
|
||||
# ... existing code ...
|
||||
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
# We'll initialize chains on demand to avoid startup issues
|
||||
self.prompt_analyzer_chains = {}
|
||||
|
||||
|
||||
def _get_prompt_analyzer_chain(self, language='ar'):
|
||||
"""
|
||||
Get or create a prompt analyzer chain for the specified language.
|
||||
@ -137,7 +137,7 @@ class ModelAnalystView(View):
|
||||
if language not in self.prompt_analyzer_chains:
|
||||
self.prompt_analyzer_chains[language] = create_prompt_analyzer_chain(language)
|
||||
return self.prompt_analyzer_chains[language]
|
||||
|
||||
|
||||
def _analyze_prompt_with_llm(self, prompt, language='ar'):
|
||||
"""
|
||||
Use LangChain and Ollama to analyze the prompt.
|
||||
@ -148,10 +148,10 @@ class ModelAnalystView(View):
|
||||
if not chain:
|
||||
# Fallback to rule-based analysis if chain creation failed
|
||||
return self._analyze_prompt_rule_based(prompt, language)
|
||||
|
||||
|
||||
# Run the chain
|
||||
result = chain.run(prompt=prompt)
|
||||
|
||||
|
||||
# Parse the JSON result
|
||||
# Find JSON content within the response (in case the LLM adds extra text)
|
||||
json_match = re.search(r'({.*})', result.replace('\n', ' '), re.DOTALL)
|
||||
@ -161,12 +161,12 @@ class ModelAnalystView(View):
|
||||
else:
|
||||
# Fallback to rule-based analysis
|
||||
return self._analyze_prompt_rule_based(prompt, language)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in LLM prompt analysis: {str(e)}")
|
||||
# Fallback to rule-based analysis
|
||||
return self._analyze_prompt_rule_based(prompt, language)
|
||||
|
||||
|
||||
def _analyze_prompt_rule_based(self, prompt, language='ar'):
|
||||
"""
|
||||
Rule-based fallback for prompt analysis.
|
||||
@ -177,19 +177,19 @@ class ModelAnalystView(View):
|
||||
"target_models": target_models,
|
||||
"query_params": query_params
|
||||
}
|
||||
|
||||
|
||||
def _process_prompt(self, prompt, user, dealer_id, language='ar'):
|
||||
"""
|
||||
Process the natural language prompt and generate insights.
|
||||
"""
|
||||
# ... existing code ...
|
||||
|
||||
|
||||
# Use LLM for prompt analysis
|
||||
analysis_result = self._analyze_prompt_with_llm(prompt, language)
|
||||
analysis_type = analysis_result.get('analysis_type', 'general')
|
||||
target_models = analysis_result.get('target_models', [])
|
||||
query_params = analysis_result.get('query_params', {})
|
||||
|
||||
|
||||
# ... rest of the method ...
|
||||
```
|
||||
|
||||
@ -215,7 +215,7 @@ def test_ollama_connection():
|
||||
if not llm:
|
||||
print("Failed to initialize Ollama LLM")
|
||||
return
|
||||
|
||||
|
||||
# Test with Arabic prompt
|
||||
arabic_prompt = "مرحبا، كيف حالك؟"
|
||||
print(f"Testing Arabic prompt: {arabic_prompt}")
|
||||
@ -232,7 +232,7 @@ def test_prompt_analysis():
|
||||
if not chain:
|
||||
print("Failed to create prompt analyzer chain")
|
||||
return
|
||||
|
||||
|
||||
# Test with an Arabic analysis prompt
|
||||
analysis_prompt = "كم عدد السيارات التي لدينا؟"
|
||||
print(f"Testing analysis prompt: {analysis_prompt}")
|
||||
|
||||
@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
def get_llm_instance():
|
||||
try:
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
|
||||
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
|
||||
temperature = getattr(settings, 'OLLAMA_TEMPERATURE', 0.2)
|
||||
top_p = getattr(settings, 'OLLAMA_TOP_P', 0.8)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user