-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathanalyze_twitter_comprehensive.py
More file actions
200 lines (159 loc) · 7.47 KB
/
analyze_twitter_comprehensive.py
File metadata and controls
200 lines (159 loc) · 7.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
#!/usr/bin/env python3
"""
Final comprehensive script to analyze X (Twitter) video post
"""
import requests
import re
import json
from urllib.parse import urlparse, parse_qs
def analyze_tweet_content(url):
"""
Analyze the tweet to determine if it contains a video and extract relevant information
"""
print(f"Analyzing: {url}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
}
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
print(f"Status Code: {response.status_code}")
print(f"Content Type: {response.headers.get('Content-Type', 'Unknown')}")
# Look for any indicators of media content
content_lower = response.text.lower()
# Check for video-related keywords
video_keywords = ['video', 'playable-media', 'media-url', 'tweet-video', 'player', 'vid']
media_keywords = ['media', 'photo', 'image', 'gif']
found_video_keywords = [kw for kw in video_keywords if kw in content_lower]
found_media_keywords = [kw for kw in media_keywords if kw in content_lower]
print(f"Found video-related keywords: {found_video_keywords}")
print(f"Found media-related keywords: {found_media_keywords}")
# Check for tweet status (might be deleted, private, etc.)
if 'not found' in content_lower or '404' in content_lower or 'this account doesn\'t exist' in content_lower:
print("Tweet may not exist or is unavailable")
return None
if 'protected' in content_lower or 'private' in content_lower:
print("Tweet may be protected or private")
return None
# Try to find any embedded JSON data that might contain tweet info
# Twitter often embeds tweet data in script tags
# Look for tweet data patterns
tweet_data_patterns = [
r'({"[^{}]*tweet[^{}]*})',
r'({"[^{}]*status[^{}]*})',
r'({"[^{}]*media[^{}]*})',
r'"text".*?"[^"]*"',
]
for pattern in tweet_data_patterns:
matches = re.findall(pattern, response.text, re.IGNORECASE)
if matches:
print(f"Found potential tweet data matches (first 500 chars of first match):")
print(matches[0][:500] + "..." if len(matches[0]) > 500 else matches[0])
break
# Look for actual video URLs in various formats
# Twitter video URLs often have specific patterns
video_urls = []
# Standard video URL patterns
patterns = [
r'"(https?://[^"]*video[^"]*\.mp4[^"]*)"', # Video in URL
r'"(https?://[^"]*\.mp4[^"]*)"', # Any MP4
r'(https?://[^\'"\s<>]*video[^\'"\s<>]*\.mp4[^\'"\s<>]*)', # Unquoted with 'video'
r'(https?://[^\'"\s<>]*\.mp4[^\'"\s<>]*)', # Any MP4 URL
]
for pattern in patterns:
matches = re.findall(pattern, response.text)
video_urls.extend(matches)
if video_urls:
print(f"Found {len(set(video_urls))} unique video URLs:")
for url in set(video_urls): # Use set to get unique URLs
print(f" - {url}")
else:
print("No direct video URLs found in source")
return {
'status_code': response.status_code,
'has_video_keywords': bool(found_video_keywords),
'has_media_keywords': bool(found_media_keywords),
'video_urls': list(set(video_urls)),
}
except requests.exceptions.RequestException as e:
print(f"Error fetching page: {e}")
return None
def check_tweet_with_api_proxy(url):
"""
Try to get tweet information using a proxy service that might have different access
"""
print(f"\nTrying API proxy approach...")
# Extract tweet ID
tweet_id_match = re.search(r'/status/(\d+)', url)
if not tweet_id_match:
print("Could not extract tweet ID")
return None
tweet_id = tweet_id_match.group(1)
print(f"Tweet ID: {tweet_id}")
# Try using a public Twitter API proxy
# Note: These services may have limitations
api_proxies = [
f"https://api.fxtwitter.com/SpencerHakimian/status/{tweet_id}",
f"https://api.vxtwitter.com/SpencerHakimian/status/{tweet_id}",
]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
for api_url in api_proxies:
try:
response = requests.get(api_url, headers=headers)
if response.status_code == 200:
try:
data = json.loads(response.text)
print(f"Successfully fetched from: {api_url}")
# Check if response has video information
if 'mediaURL' in data or 'video' in str(data).lower():
print("Video content detected in API response")
if 'mediaURL' in data:
print(f"Media URL: {data['mediaURL']}")
return data
# Print some relevant information
for key in ['text', 'media', 'video', 'url', 'links']:
if key in data:
print(f"{key.capitalize()}: {data[key]}")
return data
except json.JSONDecodeError:
print(f"Response from {api_url} is not valid JSON")
print(f"Response preview: {response.text[:500]}...")
except Exception as e:
print(f"Error accessing {api_url}: {e}")
return None
def main():
url = "https://x.com/SpencerHakimian/status/1986221027198927235"
print("=" * 60)
print("COMPREHENSIVE X VIDEO ANALYSIS")
print("=" * 60)
# Basic analysis
basic_info = analyze_tweet_content(url)
if basic_info:
print(f"\nBasic Analysis Results:")
print(f" Status Code: {basic_info['status_code']}")
print(f" Has Video Keywords: {basic_info['has_video_keywords']}")
print(f" Has Media Keywords: {basic_info['has_media_keywords']}")
print(f" Video URLs Found: {len(basic_info['video_urls'])}")
# API proxy approach
api_data = check_tweet_with_api_proxy(url)
if not basic_info and not api_data:
print("\nCould not access tweet information. Possible reasons:")
print("- Tweet is private or protected")
print("- Tweet has been deleted")
print("- X has blocked access from this location/user agent")
print("- Tweet doesn't contain video content")
print("- Content requires authentication")
else:
print(f"\nAnalysis complete. {'' if api_data else 'Try authenticating with Twitter/X to access the content.'}")
if __name__ == "__main__":
main()