HH/apps/social/views.py
2026-01-15 14:31:58 +03:00

218 lines
8.8 KiB
Python

"""
API ViewSets for Social Media Comments app
"""
from rest_framework import viewsets, filters, status
from rest_framework.decorators import action
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from django.db.models import Q, Count, Avg
from datetime import datetime, timedelta
from .models import SocialMediaComment, SocialPlatform
from .serializers import SocialMediaCommentSerializer, SocialMediaCommentListSerializer
class SocialMediaCommentViewSet(viewsets.ModelViewSet):
"""
ViewSet for SocialMediaComment model
Provides CRUD operations and filtering for social media comments
"""
queryset = SocialMediaComment.objects.all()
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filterset_fields = ['platform', 'published_at']
search_fields = ['comments', 'author', 'comment_id']
ordering_fields = ['published_at', 'scraped_at', 'like_count', 'reply_count']
ordering = ['-published_at']
def get_serializer_class(self):
"""Use different serializers for list and detail views"""
if self.action == 'list':
return SocialMediaCommentListSerializer
return SocialMediaCommentSerializer
def get_queryset(self):
"""Optimize queryset with filters"""
queryset = super().get_queryset()
# Filter by date range
start_date = self.request.query_params.get('start_date')
end_date = self.request.query_params.get('end_date')
if start_date:
queryset = queryset.filter(published_at__gte=start_date)
if end_date:
queryset = queryset.filter(published_at__lte=end_date)
# Filter by minimum sentiment score (from ai_analysis)
min_sentiment = self.request.query_params.get('min_sentiment')
if min_sentiment:
# Need to filter on JSONField sentiment.score
pass # JSONField filtering is complex, skip for now
# Filter by minimum likes
min_likes = self.request.query_params.get('min_likes')
if min_likes:
queryset = queryset.filter(like_count__gte=min_likes)
# Filter by analyzed status
analyzed = self.request.query_params.get('analyzed')
if analyzed == 'true':
queryset = queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={})
elif analyzed == 'false':
queryset = queryset.filter(ai_analysis__isnull=True) | queryset.filter(ai_analysis={})
return queryset
@action(detail=False, methods=['get'])
def analytics(self, request):
"""
Get analytics data for social media comments
Returns sentiment distribution, platform distribution, and trends
"""
queryset = self.filter_queryset(self.get_queryset())
# Sentiment distribution (from ai_analysis)
sentiment_data = {'positive': 0, 'negative': 0, 'neutral': 0}
sentiment_scores = {'positive': [], 'negative': [], 'neutral': []}
for comment in queryset:
if comment.ai_analysis:
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
score = comment.ai_analysis.get('sentiment', {}).get('score', 0)
if sentiment in sentiment_data:
sentiment_data[sentiment] += 1
if score:
sentiment_scores[sentiment].append(score)
sentiment_dist = []
for sentiment, count in sentiment_data.items():
scores = sentiment_scores[sentiment]
avg_score = sum(scores) / len(scores) if scores else 0
sentiment_dist.append({
'sentiment': sentiment,
'count': count,
'avg_sentiment_score': avg_score
})
# Platform distribution (from ai_analysis)
platform_dist = []
for platform_code, platform_name in SocialPlatform.choices:
platform_data = queryset.filter(platform=platform_code)
if platform_data.exists():
# Calculate avg sentiment from ai_analysis
sentiment_scores = []
for comment in platform_data:
if comment.ai_analysis:
score = comment.ai_analysis.get('sentiment', {}).get('score', 0)
if score:
sentiment_scores.append(score)
avg_sentiment = sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0
platform_dist.append({
'platform': platform_code,
'platform_display': platform_name,
'count': platform_data.count(),
'avg_sentiment': avg_sentiment,
'total_likes': int(platform_data.aggregate(total=Sum('like_count'))['total'] or 0),
'total_replies': int(platform_data.aggregate(total=Sum('reply_count'))['total'] or 0),
})
# Daily trends (last 30 days) - from ai_analysis
thirty_days_ago = datetime.now() - timedelta(days=30)
from collections import defaultdict
daily_data = defaultdict(lambda: {'count': 0, 'positive': 0, 'negative': 0, 'neutral': 0})
for comment in queryset.filter(published_at__gte=thirty_days_ago):
if comment.published_at:
day = comment.published_at.date()
daily_data[day]['count'] += 1
if comment.ai_analysis:
sentiment = comment.ai_analysis.get('sentiment', {}).get('classification', {}).get('en', 'neutral')
if sentiment in ['positive', 'negative', 'neutral']:
daily_data[day][sentiment] += 1
daily_trends = [
{
'day': day,
**stats
}
for day, stats in sorted(daily_data.items())
]
# Top keywords (from ai_analysis)
all_keywords = []
for comment in queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={}):
keywords = comment.ai_analysis.get('keywords', {}).get('en', [])
all_keywords.extend(keywords)
from collections import Counter
keyword_counts = Counter(all_keywords)
top_keywords = [{'keyword': k, 'count': v} for k, v in keyword_counts.most_common(20)]
# Top topics (from ai_analysis)
all_topics = []
for comment in queryset.exclude(ai_analysis__isnull=True).exclude(ai_analysis={}):
topics = comment.ai_analysis.get('topics', {}).get('en', [])
all_topics.extend(topics)
topic_counts = Counter(all_topics)
top_topics = [{'topic': k, 'count': v} for k, v in topic_counts.most_common(10)]
return Response({
'sentiment_distribution': list(sentiment_dist),
'platform_distribution': list(platform_dist),
'daily_trends': list(daily_trends),
'top_keywords': top_keywords,
'top_topics': top_topics,
'total_comments': queryset.count(),
'analyzed_comments': sum(1 for c in queryset if c.ai_analysis),
})
@action(detail=False, methods=['post'])
def trigger_analysis(self, request):
"""
Trigger AI analysis for unanalyzed comments
"""
unanalyzed = SocialMediaComment.objects.filter(ai_analysis__isnull=True) | SocialMediaComment.objects.filter(ai_analysis={})
count = unanalyzed.count()
if count == 0:
return Response({
'message': 'No unanalyzed comments found',
'count': 0
})
# Trigger Celery task for analysis
from .tasks import analyze_comments_batch
task = analyze_comments_batch.delay([c.id for c in unanalyzed[:100]]) # Batch of 100
return Response({
'message': f'Analysis triggered for {min(count, 100)} comments',
'task_id': task.id,
'count': min(count, 100),
'remaining': max(0, count - 100)
})
@action(detail=True, methods=['post'])
def reanalyze(self, request, pk=None):
"""
Reanalyze a specific comment
"""
comment = self.get_object()
# Trigger Celery task for reanalysis
from .tasks import analyze_comments_batch
task = analyze_comments_batch.delay([comment.id])
return Response({
'message': f'Reanalysis triggered for comment {comment.id}',
'task_id': task.id
})
# Import Sum for analytics
from django.db.models import Sum