642 lines
22 KiB
Python
642 lines
22 KiB
Python
"""
|
|
Social Media UI views - Server-rendered templates for social media monitoring
|
|
"""
|
|
from django.contrib import messages
|
|
from django.contrib.auth.decorators import login_required
|
|
from django.core.paginator import Paginator
|
|
from django.db.models import Q, Count, Avg, Sum
|
|
from django.http import JsonResponse
|
|
from django.shortcuts import render, redirect
|
|
from django.views.decorators.http import require_http_methods
|
|
|
|
from .models import SocialMediaComment, SocialPlatform
|
|
|
|
|
|
@login_required
|
|
def social_comment_list(request):
|
|
"""
|
|
Social media comments list view with advanced filters and pagination.
|
|
|
|
Features:
|
|
- Server-side pagination
|
|
- Advanced filters (platform, sentiment, date range, etc.)
|
|
- Search by comment text, author
|
|
- Export capability
|
|
"""
|
|
# Base queryset
|
|
queryset = SocialMediaComment.objects.all()
|
|
|
|
# Apply filters from request
|
|
platform_filter = request.GET.get('platform')
|
|
if platform_filter:
|
|
queryset = queryset.filter(platform=platform_filter)
|
|
|
|
sentiment_filter = request.GET.get('sentiment')
|
|
if sentiment_filter:
|
|
# Filter by sentiment in ai_analysis JSONField
|
|
queryset = queryset.filter(
|
|
ai_analysis__sentiment__classification__en=sentiment_filter
|
|
)
|
|
|
|
analyzed_filter = request.GET.get('analyzed')
|
|
if analyzed_filter == 'true':
|
|
queryset = queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={})
|
|
elif analyzed_filter == 'false':
|
|
queryset = queryset.filter(ai_analysis__isnull=True) | queryset.filter(ai_analysis={})
|
|
|
|
# Date range filters
|
|
date_from = request.GET.get('date_from')
|
|
if date_from:
|
|
queryset = queryset.filter(published_at__gte=date_from)
|
|
|
|
date_to = request.GET.get('date_to')
|
|
if date_to:
|
|
queryset = queryset.filter(published_at__lte=date_to)
|
|
|
|
# Minimum likes
|
|
min_likes = request.GET.get('min_likes')
|
|
if min_likes:
|
|
queryset = queryset.filter(like_count__gte=min_likes)
|
|
|
|
# Search
|
|
search_query = request.GET.get('search')
|
|
if search_query:
|
|
queryset = queryset.filter(
|
|
Q(comments__icontains=search_query) |
|
|
Q(author__icontains=search_query) |
|
|
Q(comment_id__icontains=search_query)
|
|
)
|
|
|
|
# Ordering
|
|
order_by = request.GET.get('order_by', '-published_at')
|
|
queryset = queryset.order_by(order_by)
|
|
|
|
# Pagination
|
|
page_size = int(request.GET.get('page_size', 25))
|
|
paginator = Paginator(queryset, page_size)
|
|
page_number = request.GET.get('page', 1)
|
|
page_obj = paginator.get_page(page_number)
|
|
|
|
# Get platform choices
|
|
platforms = SocialPlatform.choices
|
|
|
|
# Calculate statistics from queryset (using ai_analysis)
|
|
total_comments = queryset.count()
|
|
analyzed_comments = 0
|
|
positive_count = 0
|
|
negative_count = 0
|
|
neutral_count = 0
|
|
|
|
for comment in queryset:
|
|
if comment.ai_analysis:
|
|
analyzed_comments += 1
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
|
|
if sentiment == 'positive':
|
|
positive_count += 1
|
|
elif sentiment == 'negative':
|
|
negative_count += 1
|
|
else:
|
|
neutral_count += 1
|
|
|
|
stats = {
|
|
'total': total_comments,
|
|
'analyzed': analyzed_comments,
|
|
'unanalyzed': total_comments - analyzed_comments,
|
|
'positive': positive_count,
|
|
'negative': negative_count,
|
|
'neutral': neutral_count,
|
|
}
|
|
|
|
# Add platform-specific counts
|
|
for platform_code, platform_name in platforms:
|
|
stats[platform_code] = SocialMediaComment.objects.filter(platform=platform_code).count()
|
|
|
|
context = {
|
|
'page_obj': page_obj,
|
|
'comments': page_obj.object_list,
|
|
'stats': stats,
|
|
'platforms': platforms,
|
|
'filters': request.GET,
|
|
}
|
|
|
|
return render(request, 'social/social_comment_list.html', context)
|
|
|
|
|
|
@login_required
|
|
def social_comment_detail(request, pk):
|
|
"""
|
|
Social media comment detail view.
|
|
|
|
Features:
|
|
- Full comment details
|
|
- Raw data view
|
|
- AI analysis results
|
|
- Keywords and topics
|
|
- Entities extracted
|
|
"""
|
|
from django.shortcuts import get_object_or_404
|
|
|
|
comment = get_object_or_404(SocialMediaComment, pk=pk)
|
|
|
|
context = {
|
|
'comment': comment,
|
|
}
|
|
|
|
return render(request, 'social/social_comment_detail.html', context)
|
|
|
|
|
|
@login_required
|
|
def social_platform(request, platform):
|
|
"""
|
|
Platform-specific social media comments view.
|
|
|
|
Features:
|
|
- Filtered comments for specific platform
|
|
- Platform-specific branding and metrics
|
|
- Time-based filtering
|
|
- Platform-specific trends
|
|
"""
|
|
# Validate platform
|
|
valid_platforms = [choice[0] for choice in SocialPlatform.choices]
|
|
if platform not in valid_platforms:
|
|
messages.error(request, f"Invalid platform: {platform}")
|
|
return redirect('social:social_comment_list')
|
|
|
|
# Base queryset filtered by platform
|
|
queryset = SocialMediaComment.objects.filter(platform=platform)
|
|
|
|
# Apply additional filters
|
|
sentiment_filter = request.GET.get('sentiment')
|
|
if sentiment_filter:
|
|
queryset = queryset.filter(
|
|
ai_analysis__sentiment__classification__en=sentiment_filter
|
|
)
|
|
|
|
date_from = request.GET.get('date_from')
|
|
if date_from:
|
|
queryset = queryset.filter(published_at__gte=date_from)
|
|
|
|
date_to = request.GET.get('date_to')
|
|
if date_to:
|
|
queryset = queryset.filter(published_at__lte=date_to)
|
|
|
|
search_query = request.GET.get('search')
|
|
if search_query:
|
|
queryset = queryset.filter(
|
|
Q(comments__icontains=search_query) |
|
|
Q(author__icontains=search_query)
|
|
)
|
|
|
|
# Time-based view filter
|
|
time_filter = request.GET.get('time_filter', 'all')
|
|
from datetime import datetime, timedelta
|
|
if time_filter == 'today':
|
|
queryset = queryset.filter(published_at__date=datetime.now().date())
|
|
elif time_filter == 'week':
|
|
queryset = queryset.filter(published_at__gte=datetime.now() - timedelta(days=7))
|
|
elif time_filter == 'month':
|
|
queryset = queryset.filter(published_at__gte=datetime.now() - timedelta(days=30))
|
|
|
|
# Ordering
|
|
order_by = request.GET.get('order_by', '-published_at')
|
|
queryset = queryset.order_by(order_by)
|
|
|
|
# Pagination
|
|
page_size = int(request.GET.get('page_size', 25))
|
|
paginator = Paginator(queryset, page_size)
|
|
page_number = request.GET.get('page', 1)
|
|
page_obj = paginator.get_page(page_number)
|
|
|
|
# Platform-specific statistics (using ai_analysis)
|
|
total_comments = queryset.count()
|
|
analyzed_comments = 0
|
|
positive_count = 0
|
|
negative_count = 0
|
|
neutral_count = 0
|
|
sentiment_scores = []
|
|
|
|
for comment in queryset:
|
|
if comment.ai_analysis:
|
|
analyzed_comments += 1
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
|
|
score = comment.ai_analysis.get('sentiment', {}).get('score', 0)
|
|
if sentiment == 'positive':
|
|
positive_count += 1
|
|
elif sentiment == 'negative':
|
|
negative_count += 1
|
|
else:
|
|
neutral_count += 1
|
|
if score:
|
|
sentiment_scores.append(score)
|
|
|
|
avg_sentiment = sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0
|
|
|
|
stats = {
|
|
'total': total_comments,
|
|
'analyzed': analyzed_comments,
|
|
'positive': positive_count,
|
|
'negative': negative_count,
|
|
'neutral': neutral_count,
|
|
'avg_sentiment': float(avg_sentiment),
|
|
'total_likes': int(queryset.aggregate(total=Sum('like_count'))['total'] or 0),
|
|
'total_replies': int(queryset.aggregate(total=Sum('reply_count'))['total'] or 0),
|
|
}
|
|
|
|
# Platform name for display
|
|
platform_display = dict(SocialPlatform.choices).get(platform, platform)
|
|
|
|
# Platform color for styling
|
|
platform_colors = {
|
|
'facebook': '#1877F2',
|
|
'instagram': '#C13584',
|
|
'youtube': '#FF0000',
|
|
'twitter': '#1DA1F2',
|
|
'linkedin': '#0077B5',
|
|
'tiktok': '#000000',
|
|
'google': '#4285F4',
|
|
}
|
|
platform_color = platform_colors.get(platform, '#6c757d')
|
|
|
|
context = {
|
|
'page_obj': page_obj,
|
|
'comments': page_obj.object_list,
|
|
'stats': stats,
|
|
'platform': platform,
|
|
'platform_display': platform_display,
|
|
'platform_color': platform_color,
|
|
'time_filter': time_filter,
|
|
'filters': request.GET,
|
|
}
|
|
|
|
return render(request, 'social/social_platform.html', context)
|
|
|
|
|
|
@login_required
|
|
def social_analytics(request):
|
|
"""
|
|
Social media analytics dashboard.
|
|
|
|
Features:
|
|
- Sentiment distribution
|
|
- Platform distribution
|
|
- Daily trends
|
|
- Top keywords
|
|
- Top topics
|
|
- Engagement metrics
|
|
"""
|
|
queryset = SocialMediaComment.objects.all()
|
|
|
|
# Platform filter
|
|
platform_filter = request.GET.get('platform')
|
|
if platform_filter:
|
|
queryset = queryset.filter(platform=platform_filter)
|
|
|
|
# Apply date range filter
|
|
from datetime import datetime, timedelta
|
|
|
|
# Check for custom date range first
|
|
start_date = request.GET.get('start_date')
|
|
end_date = request.GET.get('end_date')
|
|
|
|
if start_date and end_date:
|
|
# Custom date range specified
|
|
queryset = queryset.filter(published_at__gte=start_date, published_at__lte=end_date)
|
|
else:
|
|
# Fall back to preset date range (backwards compatibility)
|
|
date_range = int(request.GET.get('date_range', 30))
|
|
days_ago = datetime.now() - timedelta(days=date_range)
|
|
queryset = queryset.filter(published_at__gte=days_ago)
|
|
|
|
# Sentiment distribution (from ai_analysis)
|
|
sentiment_counts = {'positive': 0, 'negative': 0, 'neutral': 0}
|
|
sentiment_scores = {'positive': [], 'negative': [], 'neutral': []}
|
|
|
|
for comment in queryset:
|
|
if comment.ai_analysis:
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
|
|
score = comment.ai_analysis.get('sentiment', {}).get('score', 0)
|
|
if sentiment in sentiment_counts:
|
|
sentiment_counts[sentiment] += 1
|
|
if score:
|
|
sentiment_scores[sentiment].append(score)
|
|
|
|
sentiment_dist = []
|
|
for sentiment, count in sentiment_counts.items():
|
|
scores = sentiment_scores[sentiment]
|
|
avg_score = sum(scores) / len(scores) if scores else 0
|
|
sentiment_dist.append({
|
|
'sentiment': sentiment,
|
|
'count': count,
|
|
'avg_sentiment_score': avg_score
|
|
})
|
|
|
|
# Platform distribution (add platform_display manually) - using ai_analysis
|
|
platform_dist = []
|
|
for platform in SocialPlatform.choices:
|
|
platform_code = platform[0]
|
|
platform_name = platform[1]
|
|
platform_data = queryset.filter(platform=platform_code)
|
|
if platform_data.exists():
|
|
# Calculate avg sentiment from ai_analysis
|
|
sentiment_scores = []
|
|
for comment in platform_data:
|
|
if comment.ai_analysis:
|
|
score = comment.ai_analysis.get('sentiment', {}).get('score', 0)
|
|
if score:
|
|
sentiment_scores.append(score)
|
|
avg_sentiment = sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0
|
|
|
|
platform_dist.append({
|
|
'platform': platform_code,
|
|
'platform_display': platform_name,
|
|
'count': platform_data.count(),
|
|
'avg_sentiment': float(avg_sentiment),
|
|
'total_likes': int(platform_data.aggregate(total=Sum('like_count'))['total'] or 0),
|
|
'total_replies': int(platform_data.aggregate(total=Sum('reply_count'))['total'] or 0),
|
|
})
|
|
|
|
# Daily trends (from ai_analysis)
|
|
from collections import defaultdict
|
|
|
|
daily_data = defaultdict(lambda: {'count': 0, 'positive': 0, 'negative': 0, 'neutral': 0, 'total_likes': 0})
|
|
|
|
for comment in queryset:
|
|
if comment.published_at:
|
|
day = comment.published_at.date()
|
|
daily_data[day]['count'] += 1
|
|
daily_data[day]['total_likes'] += comment.like_count
|
|
|
|
if comment.ai_analysis:
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
|
|
if sentiment in ['positive', 'negative', 'neutral']:
|
|
daily_data[day][sentiment] += 1
|
|
|
|
daily_trends = [
|
|
{
|
|
'day': day,
|
|
**stats
|
|
}
|
|
for day, stats in sorted(daily_data.items())
|
|
]
|
|
|
|
# Top keywords (from ai_analysis)
|
|
all_keywords = []
|
|
for comment in queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={}):
|
|
keywords = comment.ai_analysis.get('keywords', {}).get('en', [])
|
|
all_keywords.extend(keywords)
|
|
|
|
from collections import Counter
|
|
keyword_counts = Counter(all_keywords)
|
|
top_keywords = [{'keyword': k, 'count': v} for k, v in keyword_counts.most_common(20)]
|
|
|
|
# Top topics (from ai_analysis)
|
|
all_topics = []
|
|
for comment in queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={}):
|
|
topics = comment.ai_analysis.get('topics', {}).get('en', [])
|
|
all_topics.extend(topics)
|
|
|
|
topic_counts = Counter(all_topics)
|
|
top_topics = [{'topic': k, 'count': v} for k, v in topic_counts.most_common(10)]
|
|
|
|
# Top entities (from ai_analysis)
|
|
all_entities = []
|
|
for comment in queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={}):
|
|
entities = comment.ai_analysis.get('entities', [])
|
|
for entity in entities:
|
|
if isinstance(entity, dict):
|
|
text_en = entity.get('text', {}).get('en', entity.get('text'))
|
|
if text_en:
|
|
all_entities.append(text_en)
|
|
|
|
entity_counts = Counter(all_entities)
|
|
top_entities = [{'entity': k, 'count': v} for k, v in entity_counts.most_common(15)]
|
|
|
|
# Overall statistics (from ai_analysis)
|
|
total_comments = queryset.count()
|
|
analyzed_comments = 0
|
|
for comment in queryset:
|
|
if comment.ai_analysis:
|
|
analyzed_comments += 1
|
|
|
|
# Engagement metrics
|
|
engagement_metrics = {
|
|
'avg_likes': float(queryset.aggregate(avg=Avg('like_count'))['avg'] or 0),
|
|
'avg_replies': float(queryset.aggregate(avg=Avg('reply_count'))['avg'] or 0),
|
|
'total_likes': int(queryset.aggregate(total=Sum('like_count'))['total'] or 0),
|
|
'total_replies': int(queryset.aggregate(total=Sum('reply_count'))['total'] or 0),
|
|
}
|
|
|
|
context = {
|
|
'sentiment_distribution': sentiment_dist,
|
|
'platform_distribution': platform_dist,
|
|
'daily_trends': daily_trends,
|
|
'top_keywords': top_keywords,
|
|
'top_topics': top_topics,
|
|
'top_entities': top_entities,
|
|
'total_comments': total_comments,
|
|
'analyzed_comments': analyzed_comments,
|
|
'unanalyzed_comments': total_comments - analyzed_comments,
|
|
'engagement_metrics': engagement_metrics,
|
|
'date_range': int(request.GET.get('date_range', 30)),
|
|
'start_date': start_date,
|
|
'end_date': end_date,
|
|
}
|
|
|
|
return render(request, 'social/social_analytics.html', context)
|
|
|
|
|
|
@login_required
|
|
@require_http_methods(["POST"])
|
|
def social_scrape_now(request):
|
|
"""
|
|
Trigger manual scraping for a platform.
|
|
"""
|
|
platform = request.POST.get('platform')
|
|
if not platform:
|
|
messages.error(request, "Please select a platform.")
|
|
return redirect('social:social_analytics')
|
|
|
|
try:
|
|
# Trigger Celery task for scraping
|
|
from .tasks import scrape_platform_comments
|
|
task = scrape_platform_comments.delay(platform)
|
|
|
|
messages.success(
|
|
request,
|
|
f"Scraping task initiated for {platform}. Task ID: {task.id}"
|
|
)
|
|
except Exception as e:
|
|
messages.error(request, f"Error initiating scraping: {str(e)}")
|
|
|
|
return redirect('social:social_analytics')
|
|
|
|
|
|
@login_required
|
|
def social_export_csv(request):
|
|
"""Export social media comments to CSV"""
|
|
import csv
|
|
from django.http import HttpResponse
|
|
from datetime import datetime
|
|
|
|
# Get filtered queryset (reuse list view logic)
|
|
queryset = SocialMediaComment.objects.all()
|
|
|
|
# Apply filters
|
|
platform_filter = request.GET.get('platform')
|
|
if platform_filter:
|
|
queryset = queryset.filter(platform=platform_filter)
|
|
|
|
sentiment_filter = request.GET.get('sentiment')
|
|
if sentiment_filter:
|
|
queryset = queryset.filter(
|
|
ai_analysis__sentiment__classification__en=sentiment_filter
|
|
)
|
|
|
|
date_from = request.GET.get('date_from')
|
|
if date_from:
|
|
queryset = queryset.filter(published_at__gte=date_from)
|
|
|
|
date_to = request.GET.get('date_to')
|
|
if date_to:
|
|
queryset = queryset.filter(published_at__lte=date_to)
|
|
|
|
# Create CSV response
|
|
response = HttpResponse(content_type='text/csv')
|
|
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
response['Content-Disposition'] = f'attachment; filename="social_comments_{timestamp}.csv"'
|
|
|
|
writer = csv.writer(response)
|
|
writer.writerow([
|
|
'ID', 'Platform', 'Comment ID', 'Author', 'Comment',
|
|
'Published At', 'Scraped At', 'Sentiment', 'Sentiment Score',
|
|
'Confidence', 'Likes', 'Replies', 'Keywords', 'Topics'
|
|
])
|
|
|
|
for comment in queryset:
|
|
# Extract data from ai_analysis
|
|
sentiment = None
|
|
sentiment_score = None
|
|
confidence = None
|
|
keywords = []
|
|
topics = []
|
|
|
|
if comment.ai_analysis:
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en')
|
|
sentiment_score = comment.ai_analysis.get('sentiment', {}).get('score')
|
|
confidence = comment.ai_analysis.get('sentiment', {}).get('confidence')
|
|
keywords = comment.ai_analysis.get('keywords', {}).get('en', [])
|
|
topics = comment.ai_analysis.get('topics', {}).get('en', [])
|
|
|
|
writer.writerow([
|
|
comment.id,
|
|
comment.get_platform_display(),
|
|
comment.comment_id,
|
|
comment.author,
|
|
comment.comments,
|
|
comment.published_at,
|
|
comment.scraped_at,
|
|
sentiment,
|
|
sentiment_score,
|
|
confidence,
|
|
comment.like_count,
|
|
comment.reply_count,
|
|
', '.join(keywords),
|
|
', '.join(topics),
|
|
])
|
|
|
|
return response
|
|
|
|
|
|
@login_required
|
|
def social_export_excel(request):
|
|
"""Export social media comments to Excel"""
|
|
import openpyxl
|
|
from django.http import HttpResponse
|
|
from datetime import datetime
|
|
|
|
# Get filtered queryset
|
|
queryset = SocialMediaComment.objects.all()
|
|
|
|
# Apply filters
|
|
platform_filter = request.GET.get('platform')
|
|
if platform_filter:
|
|
queryset = queryset.filter(platform=platform_filter)
|
|
|
|
sentiment_filter = request.GET.get('sentiment')
|
|
if sentiment_filter:
|
|
queryset = queryset.filter(
|
|
ai_analysis__sentiment__classification__en=sentiment_filter
|
|
)
|
|
|
|
date_from = request.GET.get('date_from')
|
|
if date_from:
|
|
queryset = queryset.filter(published_at__gte=date_from)
|
|
|
|
date_to = request.GET.get('date_to')
|
|
if date_to:
|
|
queryset = queryset.filter(published_at__lte=date_to)
|
|
|
|
# Create workbook
|
|
wb = openpyxl.Workbook()
|
|
ws = wb.active
|
|
ws.title = "Social Media Comments"
|
|
|
|
# Headers
|
|
headers = [
|
|
'ID', 'Platform', 'Comment ID', 'Author', 'Comment',
|
|
'Published At', 'Scraped At', 'Sentiment', 'Sentiment Score',
|
|
'Confidence', 'Likes', 'Replies', 'Keywords', 'Topics', 'Entities'
|
|
]
|
|
ws.append(headers)
|
|
|
|
# Data rows
|
|
for comment in queryset:
|
|
# Extract data from ai_analysis
|
|
sentiment = None
|
|
sentiment_score = None
|
|
confidence = None
|
|
keywords = []
|
|
topics = []
|
|
entities_text = []
|
|
|
|
if comment.ai_analysis:
|
|
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en')
|
|
sentiment_score = comment.ai_analysis.get('sentiment', {}).get('score')
|
|
confidence = comment.ai_analysis.get('sentiment', {}).get('confidence')
|
|
keywords = comment.ai_analysis.get('keywords', {}).get('en', [])
|
|
topics = comment.ai_analysis.get('topics', {}).get('en', [])
|
|
entities = comment.ai_analysis.get('entities', [])
|
|
for entity in entities:
|
|
if isinstance(entity, dict):
|
|
text_en = entity.get('text', {}).get('en', entity.get('text'))
|
|
if text_en:
|
|
entities_text.append(text_en)
|
|
|
|
ws.append([
|
|
comment.id,
|
|
comment.get_platform_display(),
|
|
comment.comment_id,
|
|
comment.author,
|
|
comment.comments,
|
|
comment.published_at.strftime('%Y-%m-%d %H:%M:%S') if comment.published_at else '',
|
|
comment.scraped_at.strftime('%Y-%m-%d %H:%M:%S') if comment.scraped_at else '',
|
|
sentiment,
|
|
sentiment_score,
|
|
confidence,
|
|
comment.like_count,
|
|
comment.reply_count,
|
|
', '.join(keywords),
|
|
', '.join(topics),
|
|
', '.join(entities_text),
|
|
])
|
|
|
|
# Create response
|
|
response = HttpResponse(
|
|
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
|
)
|
|
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
|
response['Content-Disposition'] = f'attachment; filename="social_comments_{timestamp}.xlsx"'
|
|
|
|
wb.save(response)
|
|
return response
|