Datasets:

Modalities:
Text
Formats:
text
Size:
< 1K
Libraries:
Datasets
License:
naseele's picture
Upload 03GetTweetsFor800Users
bc33227
raw
history blame
22.9 kB
import aiohttp
import asyncio
import json
import logging
import sqlite3
import pandas as pd
from datetime import datetime, timedelta
from email.utils import parsedate_to_datetime
from typing import List, Dict, Set, Optional, Tuple
from pathlib import Path
class QuotaLowException(Exception):
pass
class ArticleCrawler:
def __init__(self, api_key: str, source_db: str, target_user_count: int = 800, output_db: str = "tweets.db"):
self.api_key = api_key
self.base_url = "https://api.tweetscout.io/v2"
self.source_db = source_db
self.target_user_count = target_user_count
self.db_path = output_db
self.headers = {
"ApiKey": api_key,
"Accept": "application/json"
}
self.processed_users_file = "processed_users.json"
self.no_tweets_users_file = "no_tweets_users.json"
self.processed_users: Set[str] = self._load_processed_users()
self.no_tweets_users: Set[str] = self._load_no_tweets_users()
self.api_call_count = 0
self.warning_threshold = 5500 # 提高阈值到5000次调用
self.last_save_time = datetime.now()
self.save_interval = timedelta(minutes=5) # 每5分钟保存一次
self._setup_logging()
self._init_database()
def _load_processed_users(self) -> Set[str]:
if Path(self.processed_users_file).exists():
try:
with open(self.processed_users_file, 'r') as f:
return set(json.load(f))
except Exception as e:
logging.error(f"Error loading processed users: {e}")
return set()
return set()
def _load_no_tweets_users(self) -> Set[str]:
if Path(self.no_tweets_users_file).exists():
try:
with open(self.no_tweets_users_file, 'r') as f:
return set(json.load(f))
except Exception as e:
logging.error(f"Error loading no tweets users: {e}")
return set()
return set()
def _save_processed_users(self):
try:
with open(self.processed_users_file, 'w') as f:
json.dump(list(self.processed_users), f)
except Exception as e:
logging.error(f"Error saving processed users: {e}")
def _save_no_tweets_users(self):
try:
with open(self.no_tweets_users_file, 'w') as f:
json.dump(list(self.no_tweets_users), f)
except Exception as e:
logging.error(f"Error saving no tweets users: {e}")
def _get_next_rotation_time(self):
"""计算下一次日志切换的时间"""
now = datetime.now()
if now.hour < 12:
# 如果当前时间早于12点,下次切换时间为当天12点
next_rotation = now.replace(hour=12, minute=0, second=0, microsecond=0)
else:
# 如果当前时间晚于12点,下次切换时间为第二天0点
next_rotation = (now + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
return next_rotation
def get_progress_stats(self) -> Dict:
"""获取当前进度统计"""
try:
if not hasattr(self, 'cursor') or self.cursor is None:
return {
'user_count': 0,
'tweet_count': 0,
'start_time': None,
'last_update': None,
'processed_users': len(self.processed_users),
'no_tweets_users': len(self.no_tweets_users)
}
self.cursor.execute("""
SELECT
COUNT(DISTINCT author_id) as user_count,
COUNT(*) as tweet_count,
MIN(collected_at) as start_time,
MAX(collected_at) as last_update
FROM tweets
""")
stats = dict(zip(['user_count', 'tweet_count', 'start_time', 'last_update'],
self.cursor.fetchone()))
stats['processed_users'] = len(self.processed_users)
stats['no_tweets_users'] = len(self.no_tweets_users)
return stats
except Exception as e:
self.logger.error(f"Error getting progress stats: {e}")
return {
'user_count': 0,
'tweet_count': 0,
'start_time': None,
'last_update': None,
'processed_users': len(self.processed_users),
'no_tweets_users': len(self.no_tweets_users)
}
def _rotate_log_file(self):
"""切换到新的日志文件"""
# 关闭现有的文件处理器
for handler in self.logger.handlers[:]:
if isinstance(handler, logging.FileHandler):
handler.close()
self.logger.removeHandler(handler)
# 创建新的文件处理器,使用 utf-8 编码
new_log_file = f"crawler_{datetime.now().strftime('%Y%m%d_%H%M')}.log"
file_handler = logging.FileHandler(new_log_file, encoding='utf-8')
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
self.logger.addHandler(file_handler)
# 计算并设置下次切换时间
self.next_log_rotation = self._get_next_rotation_time()
# 输出进度统计
stats = self.get_progress_stats()
self.logger.info(f"Rotated to new log file: {new_log_file}")
self.logger.info(f"Progress Stats: {json.dumps(stats, default=str)}")
def _setup_logging(self):
"""设置日志系统"""
self.logger = logging.getLogger("ArticleCrawler")
self.logger.setLevel(logging.INFO)
# 添加控制台处理器
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
self.logger.addHandler(console_handler)
# 设置初始日志文件
self.next_log_rotation = self._get_next_rotation_time()
self._rotate_log_file()
def _init_database(self):
self.conn = sqlite3.connect(
self.db_path,
detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES,
timeout=30
)
self.conn.execute("PRAGMA journal_mode=WAL")
self.conn.execute("PRAGMA synchronous=NORMAL")
self.cursor = self.conn.cursor()
self.cursor.executescript('''
CREATE TABLE IF NOT EXISTS tweets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
tweet_id TEXT UNIQUE,
author_id TEXT,
author_name TEXT,
full_text TEXT,
created_at TIMESTAMP,
likes_count INTEGER DEFAULT 0,
retweets_count INTEGER DEFAULT 0,
replies_count INTEGER DEFAULT 0,
views_count INTEGER DEFAULT 0,
quote_count INTEGER DEFAULT 0,
is_quote_status BOOLEAN DEFAULT 0,
conversation_id TEXT,
in_reply_to_status_id TEXT,
quoted_status TEXT, -- 引用推文信息,存储为JSON
retweeted_status TEXT, -- 转发推文信息,存储为JSON
entities TEXT, -- 推文中的链接、媒体等实体,存储为JSON
author_avatar TEXT, -- 作者头像URL
author_description TEXT, -- 作者简介
author_followers_count INT, -- 作者粉丝数
author_friends_count INT, -- 作者关注数
collected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_tweet_author ON tweets(author_id);
CREATE INDEX IF NOT EXISTS idx_tweet_id ON tweets(tweet_id);
CREATE INDEX IF NOT EXISTS idx_tweet_created ON tweets(created_at);
''')
self.conn.commit()
def get_successful_user_count(self) -> int:
"""获取已成功爬取推文的用户数量"""
try:
self.cursor.execute(
"SELECT COUNT(DISTINCT author_id) FROM tweets"
)
return self.cursor.fetchone()[0]
except Exception as e:
self.logger.error(f"Error getting successful user count: {e}")
return len(self.processed_users)
def load_users(self) -> List[Dict]:
source_conn = sqlite3.connect(self.source_db)
# 计算还需要处理的用户数量
successful_count = self.get_successful_user_count()
remaining_target = max(0, self.target_user_count - successful_count)
# 如果已经达到目标,返回空列表
if remaining_target <= 0:
self.logger.info(f"Already reached target user count: {self.target_user_count}")
source_conn.close()
return []
# 考虑到可能有用户没有推文,多取一些用户
fetch_limit = remaining_target * 2
# 排除已处理和无推文的用户
excluded_users = self.processed_users.union(self.no_tweets_users)
if excluded_users:
placeholders = ','.join(['?' for _ in excluded_users])
query = f"""
SELECT user_id, name
FROM users
WHERE user_id NOT IN ({placeholders})
LIMIT {fetch_limit}
"""
df = pd.read_sql_query(query, source_conn, params=list(excluded_users))
else:
query = f"""
SELECT user_id, name
FROM users
LIMIT {fetch_limit}
"""
df = pd.read_sql_query(query, source_conn)
source_conn.close()
users = df.to_dict('records')
self.logger.info(
f"Loaded {len(users)} new users to process. "
f"Current successful users: {successful_count}, Target: {self.target_user_count}"
)
return users
async def check_api_quota(self) -> bool:
"""检查API配额,如果配额不足返回False"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{self.base_url}/quota", headers=self.headers) as response:
if response.status == 200:
quota_data = await response.json()
remaining = quota_data.get('remaining', 0)
self.logger.info(f"Current API quota remaining: {remaining}")
return remaining >= 100 # 降低保留配额到100
except Exception as e:
self.logger.error(f"Error checking API quota: {e}")
return False
return True
async def get_user_tweets(self, user: Dict, max_tweets: int = 200) -> List[Dict]:
try:
url = f"{self.base_url}/user-tweets"
all_tweets = []
cursor = ""
retries = 3
while len(all_tweets) < max_tweets:
if self.api_call_count >= self.warning_threshold:
quota_ok = await self.check_api_quota()
if not quota_ok and self.api_call_count % 100 == 0:
self.logger.warning(f"API calls: {self.api_call_count}, checking quota...")
if not await self.check_api_quota():
raise QuotaLowException("API quota running low")
data = {
"user_id": user['user_id'],
"cursor": cursor,
"limit": 200
}
got_response = False
for attempt in range(retries):
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=self.headers, json=data, timeout=30) as response:
if response.status == 200:
got_response = True
self.api_call_count += 1
response_data = await response.json()
tweets = response_data.get('tweets', [])
if not tweets:
self.logger.info(
f"No more tweets available for user {user['name']}. "
f"Collected {len(all_tweets)} tweets."
)
return all_tweets
# 为每条推文添加类型标记
for tweet in tweets:
if tweet.get('retweeted_status'):
tweet['tweet_type'] = 'retweet'
elif tweet.get('is_quote_status'):
tweet['tweet_type'] = 'quote'
else:
tweet['tweet_type'] = 'original'
all_tweets.extend(tweets)
# 记录各类推文的数量
type_counts = {
'original': len([t for t in tweets if t['tweet_type'] == 'original']),
'retweet': len([t for t in tweets if t['tweet_type'] == 'retweet']),
'quote': len([t for t in tweets if t['tweet_type'] == 'quote'])
}
self.logger.info(
f"User {user['name']}: Got {len(tweets)} tweets "
f"(Original: {type_counts['original']}, "
f"Retweet: {type_counts['retweet']}, "
f"Quote: {type_counts['quote']}). "
f"Current total: {len(all_tweets)}"
)
cursor = response_data.get('next_cursor')
if not cursor or len(all_tweets) >= max_tweets:
# 如果达到目标数量,截断到指定数量
return all_tweets[:max_tweets]
break # 成功获取数据,跳出重试循环
elif response.status == 429:
wait_time = int(response.headers.get('Retry-After', 60))
self.logger.warning(f"Rate limited, waiting {wait_time} seconds")
await asyncio.sleep(wait_time)
elif response.status == 403:
self.logger.error(f"Access forbidden for user {user['name']}")
return [] # 只有在被禁止访问时才返回空列表
elif response.status == 404:
self.logger.error(f"User {user['name']} not found")
return [] # 用户不存在时返回空列表
else:
self.logger.error(
f"Failed to get tweets for {user['name']}: "
f"Status {response.status}, Response: {await response.text()}"
)
if attempt == retries - 1:
if got_response:
return all_tweets # 如果之前成功获取过推文,返回已有的
return [] # 否则返回空列表
except Exception as e:
self.logger.error(f"Request error: {str(e)}")
if attempt == retries - 1:
if got_response:
return all_tweets # 如果之前成功获取过推文,返回已有的
return [] # 否则返回空列表
await asyncio.sleep(5)
await asyncio.sleep(3) # 请求间隔
return all_tweets # 达到目标数量时返回
except QuotaLowException:
raise
except Exception as e:
self.logger.error(f"Error getting tweets for {user['name']}: {str(e)}")
return []
def save_tweet(self, user: Dict, tweet_data: Dict) -> bool:
try:
created_at = parsedate_to_datetime(tweet_data.get('created_at'))
# 提取用户信息
user_info = tweet_data.get('user', {})
# 准备引用推文和转发推文的数据
quoted_status = tweet_data.get('quoted_status')
retweeted_status = tweet_data.get('retweeted_status')
self.cursor.execute('''
INSERT OR REPLACE INTO tweets
(tweet_id, author_id, author_name, full_text, created_at,
likes_count, retweets_count, replies_count, views_count,
quote_count, is_quote_status, conversation_id, in_reply_to_status_id,
quoted_status, retweeted_status, entities,
author_avatar, author_description, author_followers_count,
author_friends_count, collected_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (
tweet_data.get('id_str'),
user['user_id'],
user['name'],
tweet_data.get('full_text', ''),
created_at,
tweet_data.get('favorite_count', 0),
tweet_data.get('retweet_count', 0),
tweet_data.get('reply_count', 0),
tweet_data.get('view_count', 0),
tweet_data.get('quote_count', 0),
tweet_data.get('is_quote_status', False),
tweet_data.get('conversation_id_str'),
tweet_data.get('in_reply_to_status_id_str'),
json.dumps(quoted_status) if quoted_status else None,
json.dumps(retweeted_status) if retweeted_status else None,
json.dumps(tweet_data.get('entities', {})),
user_info.get('avatar'),
user_info.get('description'),
user_info.get('followers_count'),
user_info.get('friends_count')
))
self.conn.commit()
return True
except Exception as e:
self.logger.error(f"Error saving tweet: {str(e)}")
self.conn.rollback()
return False
async def process_user(self, user: Dict):
try:
# 检查是否需要切换日志文件
if datetime.now() >= self.next_log_rotation:
self._rotate_log_file()
# 检查是否需要定时保存
if datetime.now() - self.last_save_time > self.save_interval:
self._save_processed_users()
self._save_no_tweets_users()
self.last_save_time = datetime.now()
if user['user_id'] in self.processed_users or user['user_id'] in self.no_tweets_users:
self.logger.info(f"Skipping already processed user {user['name']}")
return
tweets = await self.get_user_tweets(user, max_tweets=100)
if not tweets:
self.logger.info(f"No tweets found for user {user['name']}")
self.no_tweets_users.add(user['user_id'])
self._save_no_tweets_users()
return
self.logger.info(f"Processing {len(tweets)} tweets for user {user['name']}")
success = True
for tweet in tweets:
if not self.save_tweet(user, tweet):
success = False
break
if success:
self.processed_users.add(user['user_id'])
self._save_processed_users()
self.logger.info(f"Successfully processed user {user['name']}")
except QuotaLowException:
raise
except Exception as e:
self.logger.error(f"Error processing user {user['name']}: {str(e)}")
async def run(self):
try:
while True:
users = self.load_users()
if not users:
self.logger.info("No more users to process or reached target count")
break
for user in users:
try:
await self.process_user(user)
# 检查是否已达到目标用户数
if self.get_successful_user_count() >= self.target_user_count:
self.logger.info(f"Reached target user count: {self.target_user_count}")
return
except QuotaLowException:
self.logger.info(f"API quota running low, stopping at user {user['name']}")
return
await asyncio.sleep(2) # 控制请求频率
except Exception as e:
self.logger.error(f"Unexpected error in main loop: {str(e)}")
raise # 重新抛出异常以便外层捕获
finally:
try:
self._save_processed_users()
self._save_no_tweets_users()
self.conn.close()
except Exception as e:
self.logger.error(f"Error during cleanup: {str(e)}")
async def main():
crawler = None
try:
crawler = ArticleCrawler(
api_key="Enter your API KEY",
source_db="filtered_users.db",
target_user_count=800, # 设置目标用户数
output_db="tweetsFor800.db" # 指定输出数据库文件名
)
await crawler.run()
except KeyboardInterrupt:
print("\nReceived shutdown signal, cleaning up...")
except Exception as e:
print(f"Critical error: {str(e)}")
finally:
if crawler:
crawler.conn.close() # 确保数据库连接被关闭
if __name__ == "__main__":
asyncio.run(main())