HH/apps/social/views.py
2026-02-12 15:09:48 +03:00

967 lines
40 KiB
Python

# social/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.contrib import messages
from django.core.paginator import Paginator
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
import json
import logging
import requests
import csv
from apps.social.models import SocialAccount, SocialContent, SocialComment, SocialReply
from apps.social.services.linkedin import LinkedInService, LinkedInAPIError
from apps.social.services.google import GoogleBusinessService, GoogleAPIError
from apps.social.services.meta import MetaService, MetaAPIError
from apps.social.services.tiktok import TikTokService, TikTokAPIError
from apps.social.services.x import XService, XAPIError
from apps.social.services.youtube import YouTubeService, YouTubeAPIError
from apps.social.tasks.linkedin import sync_new_comments_task as li_sync
from apps.social.tasks.google import sync_single_account as go_sync
from apps.social.tasks.meta import meta_historical_backfill_task as meta_sync,meta_poll_new_comments_task as meta_poll
from apps.social.tasks.tiktok import extract_all_comments_task as tt_full_sync, poll_new_comments_task as tt_poll
from apps.social.tasks.x import extract_all_replies_task as x_full_sync, poll_new_replies_task as x_poll
from apps.social.tasks.youtube import deep_historical_backfill_task as yt_full_sync, poll_new_comments_task as yt_poll
from apps.social.tasks.ai import analyze_pending_comments_task, analyze_comment_task, reanalyze_comment_task
from apps.social.services.ai_service import OpenRouterService
# CRITICAL FIX: Import Google/YouTube clients to fetch necessary IDs during Auth
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import Flow
logger = logging.getLogger(__name__)
PLATFORM_NAMES = {
'LI': 'LinkedIn',
'GO': 'Google Reviews',
'META': 'Meta (Facebook/Instagram)',
'TT': 'TikTok',
'X': 'X (Twitter)',
'YT': 'YouTube'
}
# For source_platform display only
SOURCE_PLATFORM_NAMES = {
'FB': 'Facebook',
'IG': 'Instagram'
}
@login_required
def dashboard(request, platform_type=None):
"""
Unified dashboard showing accounts by platform
"""
# FIX: Using 'owner' to match Model
accounts_qs = SocialAccount.objects.filter(owner=request.user)
if platform_type:
accounts_qs = accounts_qs.filter(platform_type=platform_type)
accounts = {}
for acc in accounts_qs:
is_expired = timezone.now() >= acc.expires_at if acc.expires_at else False
accounts[acc.platform_type] = {
'account': acc,
'is_expired': is_expired,
'status_msg': 'Active' if acc.is_active else 'Inactive',
'platform_name': PLATFORM_NAMES.get(acc.platform_type, acc.platform_type)
}
return render(request, 'social/dashboard.html', {
'accounts': accounts,
'selected_platform': platform_type,
'platform_names': PLATFORM_NAMES
})
@login_required
def export_comments_csv(request, platform_type):
"""
Export all comments for a specific platform to CSV with detailed fields
Supports the same filters as comments_list view
"""
account = get_object_or_404(
SocialAccount,
owner=request.user,
platform_type=platform_type
)
comments_qs = SocialComment.objects.filter(
account=account,
platform_type=platform_type
).order_by('-created_at')
# Apply the same filters as comments_list
search_query = request.GET.get('search')
if search_query:
comments_qs = comments_qs.filter(text__icontains=search_query)
sentiment_filter = request.GET.get('sentiment')
if sentiment_filter in ['positive', 'neutral', 'negative']:
comments_qs = comments_qs.filter(
ai_analysis__sentiment__classification__en=sentiment_filter
)
sync_filter = request.GET.get('sync_via_webhook')
if sync_filter == 'true':
comments_qs = comments_qs.filter(synced_via_webhook=True)
elif sync_filter == 'false':
comments_qs = comments_qs.filter(synced_via_webhook=False)
source_filter = request.GET.get('source_platform')
if source_filter in ['FB', 'IG']:
comments_qs = comments_qs.filter(source_platform=source_filter)
# Create CSV response
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{platform_type}_comments_{timezone.now().strftime("%Y%m%d_%H%M%S")}.csv"'
writer = csv.writer(response)
# Write header row with all fields
header = [
'Comment ID',
'Author Name',
'Author ID',
'Comment Text',
'Created At',
'Added At',
'Platform Type',
'Source Platform',
'Like Count',
'Reply Count',
'Rating',
'Media URL',
'Sentiment (EN)',
'Sentiment (AR)',
'Sentiment Score',
'Sentiment Confidence',
'Sentiment Urgency (EN)',
'Sentiment Urgency (AR)',
'Summary (EN)',
'Summary (AR)',
'Keywords (EN)',
'Keywords (AR)',
'Topics (EN)',
'Topics (AR)',
'Emotions',
'Primary Concern (EN)',
'Primary Concern (AR)',
'Affected Department (EN)',
'Affected Department (AR)',
'SQI: Clinical Care',
'SQI: Staff Behavior',
'SQI: Facility Condition',
'SQI: Wait Time',
'SQI: Communication',
'SQI: Overall Experience',
'Complaint Type (EN)',
'Complaint Type (AR)',
'Requires Follow-up',
'Follow-up Priority (EN)',
'Follow-up Priority (AR)',
'Recommended Actions (EN)',
'Recommended Actions (AR)',
'Patient Satisfaction Score',
'NPS Likelihood',
'Retention Risk Level',
'Retention Risk Score',
'Reputation Impact Level',
'Reputation Impact Score',
'Compliance Concerns Present',
'Compliance Concern Types',
'Patient Journey Touchpoints',
'Mentions Competitors',
'Unique Selling Points (EN)',
'Unique Selling Points (AR)',
'Improvement Opportunities (EN)',
'Improvement Opportunities (AR)'
]
writer.writerow(header)
# Write data rows
for comment in comments_qs:
# Extract AI analysis data safely
ai_data = comment.ai_analysis or {}
# Sentiment data
sentiment_data = ai_data.get('sentiment', {})
sentiment_en = sentiment_data.get('classification', {}).get('en', '')
sentiment_ar = sentiment_data.get('classification', {}).get('ar', '')
sentiment_score = sentiment_data.get('score', '')
sentiment_confidence = sentiment_data.get('confidence', '')
sentiment_urgency_en = sentiment_data.get('urgency_level', {}).get('en', '')
sentiment_urgency_ar = sentiment_data.get('urgency_level', {}).get('ar', '')
# Actionable Insights data
actionable_data = ai_data.get('actionable_insights', {})
primary_concern_en = actionable_data.get('primary_concern', {}).get('en', '')
primary_concern_ar = actionable_data.get('primary_concern', {}).get('ar', '')
affected_dept_en = actionable_data.get('affected_department', {}).get('en', '')
affected_dept_ar = actionable_data.get('affected_department', {}).get('ar', '')
# Service Quality Indicators
sqi = actionable_data.get('service_quality_indicators', {})
sqi_clinical = sqi.get('clinical_care', '')
sqi_staff = sqi.get('staff_behavior', '')
sqi_facility = sqi.get('facility_condition', '')
sqi_wait = sqi.get('wait_time', '')
sqi_communication = sqi.get('communication', '')
sqi_overall = sqi.get('overall_experience', '')
# Complaint and follow-up
complaint_type_en = actionable_data.get('complaint_type', {}).get('en', '')
complaint_type_ar = actionable_data.get('complaint_type', {}).get('ar', '')
requires_followup = actionable_data.get('requires_followup', '')
followup_priority_en = actionable_data.get('followup_priority', {}).get('en', '')
followup_priority_ar = actionable_data.get('followup_priority', {}).get('ar', '')
recommended_actions_en = '; '.join(actionable_data.get('recommended_actions', {}).get('en', []))
recommended_actions_ar = '; '.join(actionable_data.get('recommended_actions', {}).get('ar', []))
# Business Intelligence
bi_data = ai_data.get('business_intelligence', {})
patient_satisfaction = bi_data.get('patient_satisfaction_score', '')
nps_likelihood = bi_data.get('nps_likelihood', '')
retention_risk_level = bi_data.get('retention_risk', {}).get('level', '')
retention_risk_score = bi_data.get('retention_risk', {}).get('score', '')
reputation_impact_level = bi_data.get('reputation_impact', {}).get('level', '')
reputation_impact_score = bi_data.get('reputation_impact', {}).get('score', '')
compliance_present = bi_data.get('compliance_concerns', {}).get('present', '')
compliance_types = ', '.join(bi_data.get('compliance_concerns', {}).get('types', []))
# Patient Journey
journey_data = ai_data.get('patient_journey', {})
touchpoints = journey_data.get('touchpoints', {})
journey_touchpoints = ', '.join([k for k, v in touchpoints.items() if v])
# Competitive Insights
competitive_data = ai_data.get('competitive_insights', {})
mentions_competitors = competitive_data.get('mentions_competitors', '')
usp_en = ', '.join(competitive_data.get('unique_selling_points', {}).get('en', []))
usp_ar = ', '.join(competitive_data.get('unique_selling_points', {}).get('ar', []))
improvement_opp_en = ', '.join(competitive_data.get('improvement_opportunities', {}).get('en', []))
improvement_opp_ar = ', '.join(competitive_data.get('improvement_opportunities', {}).get('ar', []))
# Summary data
summary_en = ai_data.get('summaries', {}).get('en', '')
summary_ar = ai_data.get('summaries', {}).get('ar', '')
# Keywords data (comma-separated)
keywords_en = ', '.join(ai_data.get('keywords', {}).get('en', []))
keywords_ar = ', '.join(ai_data.get('keywords', {}).get('ar', []))
# Topics data (comma-separated)
topics_en = ', '.join(ai_data.get('topics', {}).get('en', []))
topics_ar = ', '.join(ai_data.get('topics', {}).get('ar', []))
# Emotions data (format: emotion1:score1, emotion2:score2)
emotions_data = ai_data.get('emotions', {})
emotions_list = []
for k, v in emotions_data.items():
# Handle both numeric values and nested structures
if isinstance(v, (int, float)):
emotions_list.append(f"{k}:{v:.2f}")
else:
# If value is not numeric, convert to string representation
emotions_list.append(f"{k}:{str(v)}")
emotions = ', '.join(emotions_list)
row = [
comment.comment_id,
comment.author_name,
comment.author_id or '',
comment.text,
comment.created_at.strftime('%Y-%m-%d %H:%M:%S') if comment.created_at else '',
comment.added_at.strftime('%Y-%m-%d %H:%M:%S') if comment.added_at else '',
comment.platform_type,
comment.source_platform or '',
comment.like_count,
comment.reply_count,
comment.rating or '',
comment.media_url or '',
sentiment_en,
sentiment_ar,
sentiment_score,
sentiment_confidence,
sentiment_urgency_en,
sentiment_urgency_ar,
summary_en,
summary_ar,
keywords_en,
keywords_ar,
topics_en,
topics_ar,
emotions,
primary_concern_en,
primary_concern_ar,
affected_dept_en,
affected_dept_ar,
sqi_clinical,
sqi_staff,
sqi_facility,
sqi_wait,
sqi_communication,
sqi_overall,
complaint_type_en,
complaint_type_ar,
requires_followup,
followup_priority_en,
followup_priority_ar,
recommended_actions_en,
recommended_actions_ar,
patient_satisfaction,
nps_likelihood,
retention_risk_level,
retention_risk_score,
reputation_impact_level,
reputation_impact_score,
compliance_present,
compliance_types,
journey_touchpoints,
mentions_competitors,
usp_en,
usp_ar,
improvement_opp_en,
improvement_opp_ar
]
writer.writerow(row)
return response
@login_required
def comments_list(request, platform_type):
"""
List all comments for a specific platform with AI analysis filtering
"""
account = get_object_or_404(
SocialAccount,
owner=request.user,
platform_type=platform_type
)
comments_qs = SocialComment.objects.filter(
account=account,
platform_type=platform_type
).order_by('-created_at')
search_query = request.GET.get('search')
if search_query:
comments_qs = comments_qs.filter(text__icontains=search_query)
# Sentiment filter - filter by AI analysis sentiment
sentiment_filter = request.GET.get('sentiment')
if sentiment_filter in ['positive', 'neutral', 'negative']:
# Filter by JSON field containing sentiment classification
comments_qs = comments_qs.filter(
ai_analysis__sentiment__classification__en=sentiment_filter
)
sync_filter = request.GET.get('sync_via_webhook')
if sync_filter == 'true':
comments_qs = comments_qs.filter(synced_via_webhook=True)
elif sync_filter == 'false':
comments_qs = comments_qs.filter(synced_via_webhook=False)
# Source platform filter for Meta (FB/IG)
source_filter = request.GET.get('source_platform')
if source_filter in ['FB', 'IG']:
comments_qs = comments_qs.filter(source_platform=source_filter)
paginator = Paginator(comments_qs, 20)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, 'social/comments_list.html', {
'account': account,
'page_obj': page_obj,
'platform_type': platform_type,
'platform_name': PLATFORM_NAMES.get(platform_type, platform_type),
'search_query': search_query,
'sentiment_filter': sentiment_filter,
'sync_filter': sync_filter
})
# Corrected comment_detail view for views.py
@login_required
def comment_detail(request, platform_type, comment_id):
"""
Show comment detail and allow reply
"""
comment = get_object_or_404(
SocialComment,
comment_id=comment_id,
platform_type=platform_type,
account__owner=request.user
)
if request.method == "POST":
text = request.POST.get('text')
if not text:
messages.error(request, "Reply text cannot be empty.")
else:
try:
res = None
account = comment.account
if platform_type == 'LI':
res = LinkedInService.post_reply(account, comment.content_id, text)
elif platform_type == 'GO':
res = GoogleBusinessService.reply_to_review(account, comment.content_id, text)
elif platform_type == 'META':
# FIXED: Use correct method signature
# Get access token (may be stored in content_data for page-specific token)
access_token = comment.content.content_data.get('access_token', comment.account.access_token)
res = MetaService.post_reply(comment_id, access_token,comment.source_platform, text)
elif platform_type == 'TT':
# UPDATED: Pass video_id (ad_id) and comment_id
ad_id = comment.content.content_id
res = TikTokService.reply_to_comment(account, ad_id, comment.comment_id, text)
elif platform_type == 'X':
res = XService.post_reply(account, comment.content_id, text)
elif platform_type == 'YT':
res = YouTubeService.post_reply(account, comment_id, text)
if res:
# Extract ID based on platform specific response structure
reply_id = None
if platform_type == 'TT':
# TikTok Business API returns: {"comment_id": "..."}
reply_id = res.get('comment_id')
elif platform_type == 'GO':
# Google returns the resource name in 'name'
reply_id = res.get('name')
else:
# LinkedIn, Meta, X usually return 'id'
reply_id = res.get('id')
if reply_id:
SocialReply.objects.create(
platform_type=platform_type,
account=account,
comment=comment,
reply_id=reply_id,
author_name="You",
text=text,
created_at=timezone.now(),
reply_data=res
)
messages.success(request, "Reply posted successfully!")
return redirect('social:comments_list', platform_type=platform_type)
else:
# We got a response but couldn't find the ID
logger.warning(f"Reply success but ID missing in response: {res}")
messages.error(request, "Reply posted but ID tracking failed.")
else:
messages.error(request, "Reply failed: No response from API.")
except Exception as e:
messages.error(request, f"API Error: {str(e)}")
# if res and (res.get('id') or isinstance(res, dict) and 'id' in str(res)):
# SocialReply.objects.create(
# platform_type=platform_type,
# account=account,
# comment=comment,
# reply_id=res.get('id'),
# author_name="You",
# text=text,
# created_at=timezone.now(),
# reply_data=res
# )
# messages.success(request, "Reply posted successfully!")
# return redirect('social:comments_list', platform_type=platform_type)
# else:
# messages.error(request, "Reply posted but ID tracking failed.")
# except Exception as e:
# messages.error(request, f"API Error: {str(e)}")
# Get all replies for this comment
replies = comment.replies.all()
return render(request, 'social/comment_detail.html', {
'comment': comment,
'replies': replies,
'platform_type': platform_type,
'platform_name': PLATFORM_NAMES.get(platform_type, platform_type)
})
@login_required
def manual_sync(request, platform_type, sync_type='delta'):
"""
Trigger manual sync for a specific platform
"""
account = get_object_or_404(
SocialAccount,
owner=request.user,
platform_type=platform_type
)
try:
if platform_type == 'LI':
li_sync.delay(account.id)
messages.success(request, "LinkedIn sync started (check back in 2 mins)")
elif platform_type == 'GO':
go_sync(account.id)
messages.success(request, "Google Reviews sync completed")
elif platform_type == 'META':
# meta_sync.delay(account.id)
# messages.success(request, "Meta (Facebook & Instagram) sync started")
if sync_type == 'full':
meta_sync.delay(account.id)
messages.success(request, "Meta full sync started (may take a while)")
else:
meta_poll.delay()
messages.success(request, "Meta delta sync started")
elif platform_type == 'TT':
if sync_type == 'full':
tt_full_sync.delay(account.id)
messages.success(request, "TikTok full sync started (may take a while)")
else:
tt_poll.delay()
messages.success(request, "TikTok delta sync started")
elif platform_type == 'X':
if sync_type == 'full':
x_full_sync.delay(account.id)
messages.success(request, "X full sync started (may take a while)")
else:
x_poll.delay()
messages.success(request, "X delta sync started")
elif platform_type == 'YT':
if sync_type == 'full':
yt_full_sync.delay(account.id)
messages.success(request, "YouTube full sync started (may take a while)")
else:
yt_poll.delay()
messages.success(request, "YouTube delta sync started")
except Exception as e:
messages.error(request, f"Sync failed: {str(e)}")
return redirect('social:dashboard')
@login_required
def auth_start(request, platform_type):
auth_url = None
try:
if platform_type == 'LI':
auth_url = LinkedInService.get_auth_url()
elif platform_type == 'GO':
auth_url = GoogleBusinessService.get_auth_url()
elif platform_type == 'META':
auth_url = MetaService.get_auth_url()
elif platform_type == 'TT':
auth_url = TikTokService.get_auth_url()
elif platform_type == 'X':
verifier, challenge, state = XService.generate_auth_params()
request.session['x_code_verifier'] = verifier
request.session['x_state'] = state
auth_url = XService.get_auth_url(challenge, state)
elif platform_type == 'YT':
auth_url, _ = YouTubeService.get_auth_url()
if auth_url:
return redirect(auth_url)
else:
messages.error(request, f"OAuth not configured for {PLATFORM_NAMES.get(platform_type)}")
return redirect('social:dashboard')
except Exception as e:
messages.error(request, f"Auth failed: {str(e)}")
return redirect('social:dashboard')
@login_required
def auth_callback(request, platform_type):
if "error" in request.GET:
messages.error(request, f"Auth Failed: {request.GET.get('error_description')}")
return redirect('social:dashboard')
code = request.GET.get('code')
state = request.GET.get('state')
try:
if platform_type == 'LI':
token_data = LinkedInService.exchange_code_for_token(code)
headers = {"Authorization": f"Bearer {token_data['access_token']}"}
org_res = requests.get(
f"https://api.linkedin.com/v2/organizationalEntityAcls?q=roleAssignee",
headers=headers
)
if org_res.status_code != 200:
raise Exception("Failed to fetch Org")
org_urn = org_res.json().get('elements', [{}])[0].get('organizationalTarget')
if not org_urn:
raise Exception("No Org found")
acc_exp = timezone.now() + timezone.timedelta(seconds=token_data['expires_in'])
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='LI',
defaults={
"platform_id": org_urn,
"access_token": token_data['access_token'],
"refresh_token": token_data['refresh_token'],
"is_active": True,
"expires_at": acc_exp,
"name": f"LinkedIn Account"
}
)
messages.success(request, "Connected to LinkedIn!")
elif platform_type == 'GO':
# FIX: Google Auth - Fetch Account Name from API
token_data = GoogleBusinessService.exchange_code_for_token(code)
# Build service to get account name (e.g. accounts/123456)
creds = Credentials.from_authorized_user_info(token_data)
service = build('mybusinessaccountmanagement', 'v1', credentials=creds)
accounts = service.accounts().list().execute()
if not accounts.get('accounts'):
raise Exception("No Google Business Account found. Make sure you are admin.")
# Get the first account (User can select multiple in a full app, but keeping simple here)
google_account_name = accounts['accounts'][0]['name']
acc_exp = timezone.now() + timezone.timedelta(days=30)
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='GO',
defaults={
"platform_id": google_account_name, # Save 'accounts/...' ID
"access_token": json.dumps(token_data),
"is_active": True,
"expires_at": acc_exp,
"name": "Google Business"
}
)
messages.success(request, "Connected to Google Business!")
# elif platform_type == 'META':
# token_data = MetaService.exchange_code_for_token(code)
# res = requests.get(
# f"https://graph.facebook.com/me?access_token={token_data['access_token']}&fields=name,id"
# )
# user_data = res.json()
# acc_exp = timezone.now() + timezone.timedelta(seconds=token_data.get('expires_in', 5184000))
# SocialAccount.objects.update_or_create(
# owner=request.user,
# platform_type='META',
# defaults={
# "platform_id": user_data.get('id'),
# "access_token": token_data['access_token'],
# "is_active": True,
# "expires_at": acc_exp,
# "name": user_data.get('name', "Meta Account")
# }
# )
# messages.success(request, "Connected to Meta (Facebook & Instagram)!")
elif platform_type == 'META':
token_data = MetaService.exchange_code_for_tokens(code)
# FIXED: Use BASE_GRAPH_URL from utils for consistency
from apps.social.utils.meta import BASE_GRAPH_URL
# FIXED: Better error handling
res = requests.get(
f"{BASE_GRAPH_URL}/me",
params={
"access_token": token_data['access_token'],
"fields": "name,id"
}
)
# Check for HTTP errors
if res.status_code != 200:
raise Exception(f"Failed to fetch user info: {res.text}")
user_data = res.json()
# Check for API errors
if 'error' in user_data:
error_msg = user_data['error'].get('message', 'Unknown error')
raise Exception(f"API Error: {error_msg}")
# Use the expires_at datetime returned by MetaService
acc_exp = token_data.get('expires_at')
# Save account
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='META',
defaults={
"platform_id": user_data.get('id'),
"access_token": token_data['access_token'],
"is_active": True,
"expires_at": acc_exp,
"name": user_data.get('name', "Meta Account")
}
)
messages.success(request, "Connected to Meta (Facebook & Instagram)!")
elif platform_type == 'TT':
token_data = TikTokService.exchange_code_for_token(code)
# BUSINESS API SPECIFIC:
# The token data must contain advertiser_ids.
# If not present, you might need to call 'advertiser/get/' endpoint here.
advertiser_ids = token_data.get('advertiser_ids', [])
if not advertiser_ids:
messages.error(request, "No Advertiser ID found for this TikTok account.")
return redirect('social:dashboard')
# We pick the first Advertiser ID associated with the user
advertiser_id = advertiser_ids[0]
acc_exp = timezone.now() + timezone.timedelta(seconds=token_data.get('expires_in', 86400))
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='TT',
defaults={
"platform_id": advertiser_id, # CRITICAL: Save Advertiser ID here
"access_token": token_data['access_token'],
"refresh_token": token_data['refresh_token'],
"is_active": True,
"expires_at": acc_exp,
"name": "TikTok Ads Account"
}
)
messages.success(request, "Connected to TikTok Ads!")
elif platform_type == 'X':
verifier = request.session.get('x_code_verifier')
state_check = request.session.get('x_state')
if not verifier or state != state_check:
raise Exception("Invalid state")
token_data = XService.exchange_code_for_token(code, verifier)
# Need to create temp account to use _make_request, or just call raw requests
# Creating temp dict is safer to reuse logic
temp_acc = SocialAccount(platform_type='X', access_token=token_data['access_token'])
user_info = XService._make_request(
f"users/me",
temp_acc,
"GET"
).get('data', {})
acc_exp = timezone.now() + timezone.timedelta(seconds=token_data.get('expires_in', 7200))
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='X',
defaults={
"platform_id": user_info.get('id'),
"access_token": token_data['access_token'],
"refresh_token": token_data.get('refresh_token'),
"is_active": True,
"expires_at": acc_exp,
"name": user_info.get('username', 'X Account')
}
)
messages.success(request, "Connected to X!")
elif platform_type == 'YT':
# FIX: YouTube Auth - Fetch Uploads Playlist ID
token_data = YouTubeService.exchange_code_for_token(code)
creds = Credentials.from_authorized_user_info(token_data)
youtube = build('youtube', 'v3', credentials=creds)
request_obj = youtube.channels().list(part="snippet,contentDetails", mine=True)
response = request_obj.execute()
if not response.get('items'):
raise Exception("No YouTube channel found for this account")
channel = response['items'][0]
# Get the uploads playlist ID from contentDetails
uploads_playlist_id = channel['contentDetails']['relatedPlaylists']['uploads']
# Inject this ID into the credentials_json so the background task can use it
token_data['uploads_playlist_id'] = uploads_playlist_id
# Fix: Google OAuth credentials use 'expiry' as a timestamp, not duration in seconds
expiry = token_data.get('expiry')
if expiry:
acc_exp = parse_datetime(expiry)
else:
acc_exp = timezone.now() + timezone.timedelta(seconds=3600)
SocialAccount.objects.update_or_create(
owner=request.user,
platform_type='YT',
defaults={
"platform_id": channel.get('id'),
"credentials_json": token_data, # Save with playlist_id inside
"is_active": True,
"expires_at": acc_exp,
"name": channel.get('snippet', {}).get('title', 'YouTube Channel')
}
)
messages.success(request, "Connected to YouTube!")
# Clean up session
if 'x_code_verifier' in request.session:
del request.session['x_code_verifier']
if 'x_state' in request.session:
del request.session['x_state']
except Exception as e:
logger.error(f"Auth callback error for {platform_type}: {str(e)}", exc_info=True)
messages.error(request, f"Connection failed: {str(e)}")
return redirect('social:dashboard')
@csrf_exempt
def meta_webhook(request):
if request.method == 'GET':
# ... (keep existing logic) ...
mode = request.GET.get('hub.mode')
token = request.GET.get('hub.verify_token')
challenge = request.GET.get('hub.challenge')
# FIX 1: Use settings variable
if mode == 'subscribe' and token == settings.META_WEBHOOK_VERIFY_TOKEN:
return HttpResponse(challenge)
return HttpResponse(status=403)
if request.method == 'POST':
try:
body_raw = request.body
received_sig = request.headers.get('x-hub-signature-256')
if not received_sig:
return HttpResponse(status=403)
is_valid = MetaService.verify_webhook_signature(
received_sig,
body_raw,
settings.META_APP_SECRET # FIX 2: Use settings variable
)
if not is_valid:
return HttpResponse(status=403)
payload = json.loads(body_raw)
for entry in payload.get('entry', []):
for change in entry.get('changes', []):
if change.get('field') == 'comments':
value = change.get('value', {})
comment_id = value.get('id')
page_id = value.get('from', {}).get('id')
if comment_id and page_id:
# FIX 3: Reliability Issue
# We cannot match page_id directly to DB because DB stores User ID.
# We assume this webhook belongs to the first active Meta account found.
# If you support multiple users, you would need a SocialPage mapping table.
account = SocialAccount.objects.filter(platform_type='META', is_active=True).first()
if account:
from apps.social.tasks.meta import process_webhook_comment_task
# Pass the page_id so we can find the correct token later
process_webhook_comment_task.delay(comment_id, page_id, account.id)
else:
logger.warning("Webhook received but no active Meta account found in DB.")
return HttpResponse(status=200)
except Exception as e:
logger.error(f"Webhook Error: {e}")
return HttpResponse(status=200)
return HttpResponse(status=405)
@csrf_exempt
def linkedin_webhook(request):
"""
Handle LinkedIn Webhooks (For Partner/Enterprise Accounts).
Standard accounts will not trigger this.
"""
if request.method == 'GET':
# Handshake verification
mode = request.GET.get('hub.mode')
token = request.GET.get('hub.verify_token')
challenge = request.GET.get('hub.challenge')
# Ensure this string matches what you set in LinkedIn Developer Portal
if mode == 'subscribe' and token == settings.LINKEDIN_WEBHOOK_VERIFY_TOKEN:
return HttpResponse(challenge)
return HttpResponse(status=403)
if request.method == 'POST':
try:
body_raw = request.body
received_sig = request.headers.get('X-Li-Signature')
if not received_sig:
return HttpResponse(status=403)
# 1. Verify Signature
is_valid = LinkedInService.verify_webhook_signature(
received_sig,
body_raw,
settings.LINKEDIN_CLIENT_SECRET
)
if not is_valid:
return HttpResponse(status=403)
payload = json.loads(body_raw)
# 2. Process Events
# LinkedIn sends a list of events in the payload
for event in payload:
# We are primarily interested in new comments
if event.get('event') == 'commentCreated':
resource_urn = event.get('resourceUrn')
# Resource URN format: urn:li:comment:(POST_URN, COMMENT_ID)
# Example: urn:li:comment:(urn:li:ugcPost:67890, 12345)
if resource_urn and ":comment:(" in resource_urn:
# Parse the URN to extract the Post ID and Comment ID
try:
# Extract inner part: (urn:li:ugcPost:67890, 12345)
inner_part = resource_urn.split(":comment:(")[1].split(")")[0]
urn_parts = [p.strip() for p in inner_part.split(",")]
if len(urn_parts) >= 2:
post_urn = urn_parts[0]
comment_id = urn_parts[1]
# Find the local Post object to identify the Account
content = SocialContent.objects.filter(content_id=post_urn).first()
if content and content.account:
# Trigger background task to fetch full comment details
from apps.social.tasks.linkedin import process_webhook_comment_task
process_webhook_comment_task.delay(
content.account.id,
post_urn,
comment_id
)
else:
logger.warning(f"LinkedIn Webhook: Post {post_urn} not found in DB.")
except Exception as parse_error:
logger.error(f"LinkedIn Webhook Parse Error: {parse_error}")
return HttpResponse(status=200)
except Exception as e:
logger.critical(f"LinkedIn Webhook System Error: {str(e)}", exc_info=True)
# Always return 200 to LinkedIn to prevent retries on internal server errors
return HttpResponse(status=200)
return HttpResponse(status=405)