aurman commited on
Commit
0e252eb
·
verified ·
1 Parent(s): 5efe06f

Upload trendsscraper_public.py

Browse files
Files changed (1) hide show
  1. trendsscraper_public.py +109 -0
trendsscraper_public.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Thu Nov 28 11:04:34 2024
5
+
6
+ @author: aleksandra urman
7
+ """
8
+
9
+ #this is a single iteration of the scraper, to run daily we have a cron job set up
10
+
11
+ import asyncio
12
+ from playwright.async_api import async_playwright
13
+ import os
14
+ import pandas as pd
15
+ import random
16
+ import time
17
+
18
+ # Get the current working directory
19
+ current_dir = os.getcwd()
20
+
21
+ #if wrong, set to where it should be
22
+ #os.chdir('')
23
+
24
+
25
+ #read in the trends master list (available as part of the released dataset)
26
+ df = pd.read_csv('Trends_LocationList.csv', encoding='utf-8')
27
+
28
+ # Function to scrape data for a specific tag
29
+ async def scrape_data(playwright, tag):
30
+ # Launch the browser in non-headless mode
31
+ #for testing purposes, one might want to first run this with headless=False
32
+ browser = await playwright.chromium.launch(headless=True)
33
+
34
+ # Define the folder path for the tag
35
+ base_dir = os.getcwd() # Current working directory
36
+ tag_dir = os.path.join(base_dir, "data", str(tag))
37
+ os.makedirs(tag_dir, exist_ok=True) # Ensure the directory exists
38
+
39
+ # Use the tag directory as the download directory
40
+ context = await browser.new_context(accept_downloads=True)
41
+ page = await context.new_page()
42
+
43
+ # Replace 'US' in the URL with the tag value
44
+ url = f"https://trends.google.com/trending?geo={tag}&hours=24"
45
+ await page.goto(url, wait_until="networkidle")
46
+
47
+ random_sleep = random.randint(1, 5)
48
+ await asyncio.sleep(random_sleep)
49
+
50
+ # Interact with the page elements
51
+ await page.locator("button", has_text="Export").click()
52
+ random_sleep = random.randint(1, 5)
53
+ await asyncio.sleep(random_sleep) # Adjust if less time is sufficient
54
+
55
+ # Handle the download using async context manager
56
+ async with page.expect_download() as download_info:
57
+ await page.get_by_role("menuitem", name="Download CSV").click()
58
+ download = await download_info.value
59
+
60
+ # Save the downloaded file to the tag directory
61
+ save_path = os.path.join(tag_dir, download.suggested_filename)
62
+ await download.save_as(save_path)
63
+
64
+ # Close the context and browser
65
+ await context.close()
66
+ await browser.close()
67
+ print(f"Downloaded data for tag: {tag} into {save_path}")
68
+
69
+ """
70
+ # FOR TESTS ONLY to iterate through the first 3 tags
71
+ async def main():
72
+ async with async_playwright() as playwright:
73
+ # Get the first 3 tags
74
+ first_three_tags = df['tag'][:1]
75
+
76
+ # Iterate through these tags and scrape data
77
+ for tag in first_three_tags:
78
+ try:
79
+ await scrape_data(playwright, tag)
80
+ except Exception as e:
81
+ print(f"Error scraping data for tag {tag}: {e}")
82
+
83
+ """
84
+
85
+ # Main function to iterate through tags
86
+ async def main():
87
+ async with async_playwright() as playwright:
88
+ for tag in df['tag']:
89
+ try:
90
+ await scrape_data(playwright, tag)
91
+ except Exception as e:
92
+ print(f"Error scraping data for tag {tag}: {e}")
93
+
94
+
95
+ #Some helpers, comment or uncomment if needed
96
+ # Measure the total execution time
97
+ #start_time = time.time() # Start timing
98
+ #asyncio.run(main()) # Run the main function
99
+ #end_time = time.time() # End timing
100
+
101
+ # Calculate the total time taken
102
+ #total_time = end_time - start_time
103
+
104
+ # Save the total execution time to a text file in the working directory
105
+ #time_file_path = os.path.join(current_dir, "execution_time.txt")
106
+ #with open(time_file_path, "w") as time_file:
107
+ # time_file.write(f"Total execution time: {total_time:.2f} seconds\n")
108
+
109
+ #print(f"Total execution time: {total_time:.2f} seconds. Saved to 'execution_time.txt'.")