Added basic amazon crawler using beautifulsoup4

This commit is contained in:
henningxtro 2021-05-16 22:05:32 +02:00
parent dbc793cc08
commit 2067a47fb2
2 changed files with 93 additions and 82 deletions

View File

@ -1,80 +1,90 @@
import sql import sql
import amazonspider import requests
from bs4 import BeautifulSoup
def crawl(product_ids: [int]) -> dict: HEADERS = ({'User-Agent':
""" 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 '
Crawls the given list of products and saves the results to sql 'Safari/537.36'})
:param products: The list of product IDs to fetch
:return: A dict with the following fields:
total_crawls: number of total crawl tries (products * vendors per product) def crawl(product_ids: [int]) -> dict:
successful_crawls: number of successful products """
products_with_problems: list of products that have not been crawled successfully Crawls the given list of products and saves the results to sql
""" :param products: The list of product IDs to fetch
total_crawls = 0 :return: A dict with the following fields:
successful_crawls = 0 total_crawls: number of total crawl tries (products * vendors per product)
products_with_problems = [] successful_crawls: number of successful products
products_with_problems: list of products that have not been crawled successfully
# Iterate over every product that has to be crawled """
for product_id in product_ids: total_crawls = 0
# Get all links for this product successful_crawls = 0
product_links = sql.getProductLinksForProduct(product_id) products_with_problems = []
crawled_data = [] # Iterate over every product that has to be crawled
for product_id in product_ids:
# Iterate over every link / vendor # Get all links for this product
for product_vendor_info in product_links: product_links = sql.getProductLinksForProduct(product_id)
total_crawls += 1
crawled_data = []
# Call the appropriate vendor crawling function and append the result to the list of crawled data
if product_vendor_info['vendor_id'] == 1: # Iterate over every link / vendor
# Amazon for product_vendor_info in product_links:
crawled_data.append(__crawl_amazon__(product_vendor_info)) total_crawls += 1
elif product_vendor_info['vendor_id'] == 2:
# Apple # Call the appropriate vendor crawling function and append the result to the list of crawled data
crawled_data.append(__crawl_apple__(product_vendor_info)) if product_vendor_info['vendor_id'] == 1:
elif product_vendor_info['vendor_id'] == 3: # Amazon
# Media Markt crawled_data.append(__crawl_amazon__(product_vendor_info))
crawled_data.append(__crawl_mediamarkt__(product_vendor_info)) elif product_vendor_info['vendor_id'] == 2:
else: # Apple
products_with_problems.append(product_vendor_info) crawled_data.append(__crawl_apple__(product_vendor_info))
continue elif product_vendor_info['vendor_id'] == 3:
# Media Markt
successful_crawls += 1 crawled_data.append(__crawl_mediamarkt__(product_vendor_info))
else:
# Insert data to SQL products_with_problems.append(product_vendor_info)
sql.insertData(crawled_data) continue
return { successful_crawls += 1
'total_crawls': total_crawls,
'successful_crawls': successful_crawls, # Insert data to SQL
'products_with_problems': products_with_problems sql.insertData(crawled_data)
}
return {
def __crawl_amazon__(product_info: dict) -> tuple: 'total_crawls': total_crawls,
""" 'successful_crawls': successful_crawls,
Crawls the price for the given product from amazon 'products_with_problems': products_with_problems
:param product_info: A dict with product info containing product_id, vendor_id, url }
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
""" def __crawl_amazon__(product_info: dict) -> tuple:
"""
amazonspider.start_crawling() Crawls the price for the given product from amazon
return (product_info['product_id'], product_info['vendor_id'], 123) :param product_info: A dict with product info containing product_id, vendor_id, url
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
"""
def __crawl_apple__(product_info: dict) -> tuple: page = requests.get(product_info['url'], headers= HEADERS)
""" soup = BeautifulSoup(page.content, features="lxml")
Crawls the price for the given product from apple try:
:param product_info: A dict with product info containing product_id, vendor_id, url price = int(soup.find(id='priceblock_ourprice').get_text().replace(".", "").replace(",", "").replace("", "").strip())
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) except RuntimeError:
""" price = ''
return (product_info['product_id'], product_info['vendor_id'], 123)
return (product_info['product_id'], product_info['vendor_id'], price)
def __crawl_mediamarkt__(product_info: dict) -> tuple:
""" def __crawl_apple__(product_info: dict) -> tuple:
Crawls the price for the given product from media markt """
:param product_info: A dict with product info containing product_id, vendor_id, url Crawls the price for the given product from apple
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) :param product_info: A dict with product info containing product_id, vendor_id, url
""" :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
pass """
return (product_info['product_id'], product_info['vendor_id'], 123)
def __crawl_mediamarkt__(product_info: dict) -> tuple:
"""
Crawls the price for the given product from media markt
:param product_info: A dict with product info containing product_id, vendor_id, url
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
"""
pass

View File

@ -1,5 +1,6 @@
pymysql pymysql
flask flask==1.1.2
flask-sqlalchemy flask-sqlalchemy
flask_restful flask_restful
scrapy beautifulsoup4
requests