-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathweb_scraper.py
More file actions
51 lines (37 loc) · 1.5 KB
/
web_scraper.py
File metadata and controls
51 lines (37 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import requests
from bs4 import BeautifulSoup
import csv
# The Problem: Gathering data manually takes too long.
# The Solution: Scrape quotes and authors into a CSV file.
URL = "http://quotes.toscrape.com"
def scrape_quotes():
print(f"Connecting to {URL}...")
try:
response = requests.get(URL)
# Check if the website accepted our request (Status 200 = OK)
if response.status_code != 200:
print(f"Error: Failed to load page. Status code: {response.status_code}")
return
# Parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Find all quote blocks (specific to this website structure)
quotes = soup.find_all('div', class_='quote')
data = []
print(f"Found {len(quotes)} quotes. Extracting data...")
for quote in quotes:
text = quote.find('span', class_='text').get_text()
author = quote.find('small', class_='author').get_text()
data.append([author, text])
# Save to CSV (Excel compatible)
save_to_csv(data)
except Exception as e:
print(f"An error occurred: {e}")
def save_to_csv(data):
filename = 'quotes_data.csv'
with open(filename, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['Author', 'Quote']) # Header
writer.writerows(data)
print(f"Success! Data saved to '{filename}'")
if __name__ == "__main__":
scrape_quotes()