update requirements_dev
This commit is contained in:
parent
250e0aa7bb
commit
ddb9784588
@ -18,7 +18,7 @@ Update your Django settings to use Qwen3-8B:
|
||||
|
||||
```python
|
||||
# In settings.py
|
||||
OLLAMA_BASE_URL = "http://localhost:11434"
|
||||
OLLAMA_BASE_URL = "http://10.10.1.132:11434"
|
||||
OLLAMA_MODEL = "qwen3:8b"
|
||||
OLLAMA_TIMEOUT = 120 # Seconds
|
||||
```
|
||||
@ -34,10 +34,10 @@ def get_ollama_llm():
|
||||
"""
|
||||
try:
|
||||
# Get settings from Django settings or use defaults
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://localhost:11434')
|
||||
base_url = getattr(settings, 'OLLAMA_BASE_URL', 'http://10.10.1.132:11434')
|
||||
model = getattr(settings, 'OLLAMA_MODEL', 'qwen3:8b')
|
||||
timeout = getattr(settings, 'OLLAMA_TIMEOUT', 120)
|
||||
|
||||
|
||||
# Configure Ollama with parameters optimized for Qwen3-8B with Arabic
|
||||
return Ollama(
|
||||
base_url=base_url,
|
||||
@ -71,7 +71,7 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
llm = get_ollama_llm()
|
||||
if not llm:
|
||||
return None
|
||||
|
||||
|
||||
# Define the prompt template optimized for Qwen3-8B
|
||||
if language == 'ar':
|
||||
template = """
|
||||
@ -79,9 +79,9 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
1. نوع التحليل المطلوب
|
||||
2. نماذج البيانات المستهدفة
|
||||
3. أي معلمات استعلام
|
||||
|
||||
|
||||
الاستعلام: {prompt}
|
||||
|
||||
|
||||
قم بتقديم إجابتك بتنسيق JSON فقط، بدون أي نص إضافي، كما يلي:
|
||||
```json
|
||||
{{
|
||||
@ -97,9 +97,9 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
1. The type of analysis required
|
||||
2. Target data models
|
||||
3. Any query parameters
|
||||
|
||||
|
||||
Prompt: {prompt}
|
||||
|
||||
|
||||
Provide your answer in JSON format only, without any additional text, as follows:
|
||||
```json
|
||||
{
|
||||
@ -109,13 +109,13 @@ def create_prompt_analyzer_chain(language='ar'):
|
||||
}
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
# Create the prompt template
|
||||
prompt_template = PromptTemplate(
|
||||
input_variables=["prompt"],
|
||||
template=template
|
||||
)
|
||||
|
||||
|
||||
# Create and return the LLM chain
|
||||
return LLMChain(llm=llm, prompt=prompt_template)
|
||||
```
|
||||
@ -135,13 +135,13 @@ def _parse_llm_json_response(result):
|
||||
if json_match:
|
||||
json_str = json_match.group(1).strip()
|
||||
return json.loads(json_str)
|
||||
|
||||
|
||||
# If no markdown blocks, try to find JSON object directly
|
||||
json_match = re.search(r'({[\s\S]*})', result)
|
||||
if json_match:
|
||||
json_str = json_match.group(1).strip()
|
||||
return json.loads(json_str)
|
||||
|
||||
|
||||
# If still no match, try to parse the entire response as JSON
|
||||
return json.loads(result.strip())
|
||||
except Exception as e:
|
||||
@ -159,7 +159,7 @@ def _parse_llm_json_response(result):
|
||||
## Handling Arabic-Specific Challenges with Qwen3-8B
|
||||
|
||||
1. **Diacritics**: Qwen3-8B handles Arabic diacritics well, but for consistency, consider normalizing input by removing diacritics
|
||||
|
||||
|
||||
2. **Text Direction**: When displaying results in frontend, ensure proper RTL (right-to-left) support
|
||||
|
||||
3. **Dialectal Variations**: Qwen3-8B performs best with Modern Standard Arabic (MSA), but has reasonable support for major dialects
|
||||
|
||||
@ -92,3 +92,5 @@ typing_extensions==4.13.0
|
||||
tzdata==2025.2
|
||||
urllib3==2.3.0
|
||||
wcwidth==0.2.13
|
||||
langchain
|
||||
langchain_ollama
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user