BETTERZON-58 (#53)

* BETTERZON-58: Basic Functionality with scrapy

* Added independent crawler function, yielding price

* moved logic to amazon.py

* .

* moved scrapy files to unused folder

* Added basic amazon crawler using beautifulsoup4

* Connected Api to Crawler

* Fixed string concatenation for sql statement in getProductLinksForProduct

* BETTERZON-58: Fixing SQL insert

* BETTERZON-58: Adding access key verification

* BETTERZON-58: Fixing API endpoint of the crawler
- The list of products in the API request was treated like a string and henceforth, only the first product has been crawled

* Added another selector for price on amazon (does not work for books)

Co-authored-by: root <root@DESKTOP-ARBPL82.localdomain>
Co-authored-by: Patrick Müller <patrick@mueller-patrick.tech>
Co-authored-by: Patrick <50352812+Mueller-Patrick@users.noreply.github.com>
This commit is contained in:
henningxtro 2021-05-19 00:46:14 +02:00 committed by GitHub
parent 3ae68b3df3
commit 26ba21156a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 184 additions and 153 deletions

View File

@ -2,13 +2,13 @@
<module type="WEB_MODULE" version="4"> <module type="WEB_MODULE" version="4">
<component name="FacetManager"> <component name="FacetManager">
<facet type="Python" name="Python"> <facet type="Python" name="Python">
<configuration sdkName="Python 3.9" /> <configuration sdkName="Python 3.9 (venv)" />
</facet> </facet>
</component> </component>
<component name="NewModuleRootManager" inherit-compiler-output="true"> <component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output /> <exclude-output />
<content url="file://$MODULE_DIR$" /> <content url="file://$MODULE_DIR$" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Python 3.9 interpreter library" level="application" /> <orderEntry type="library" name="Python 3.9 (venv) interpreter library" level="application" />
</component> </component>
</module> </module>

View File

@ -1,13 +1,17 @@
import os
from flask import Flask from flask import Flask
from flask_restful import Resource, Api, reqparse from flask_restful import Resource, Api, reqparse
import crawler
app = Flask(__name__) app = Flask(__name__)
api = Api(app) api = Api(app)
# To parse request data # To parse request data
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument('key') parser.add_argument('key', type=str)
parser.add_argument('products') parser.add_argument('products', type=int, action='append')
class CrawlerApi(Resource): class CrawlerApi(Resource):
@ -17,7 +21,12 @@ class CrawlerApi(Resource):
def post(self): def post(self):
# Accept crawler request here # Accept crawler request here
args = parser.parse_args() args = parser.parse_args()
return args access_key = os.getenv('CRAWLER_ACCESS_KEY')
if(args['key'] == access_key):
crawler.crawl(args['products'])
return {'message': 'success'}
else:
return {'message': 'Wrong access key'}
api.add_resource(CrawlerApi, '/') api.add_resource(CrawlerApi, '/')

View File

@ -1,78 +1,107 @@
import sql import sql
import requests
from bs4 import BeautifulSoup
def crawl(product_ids: [int]) -> dict:
""" HEADERS = ({'User-Agent':
Crawls the given list of products and saves the results to sql 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 '
:param products: The list of product IDs to fetch 'Safari/537.36'})
:return: A dict with the following fields:
total_crawls: number of total crawl tries (products * vendors per product)
successful_crawls: number of successful products def crawl(product_ids: [int]) -> dict:
products_with_problems: list of products that have not been crawled successfully """
""" Crawls the given list of products and saves the results to sql
total_crawls = 0 :param products: The list of product IDs to fetch
successful_crawls = 0 :return: A dict with the following fields:
products_with_problems = [] total_crawls: number of total crawl tries (products * vendors per product)
successful_crawls: number of successful products
# Iterate over every product that has to be crawled products_with_problems: list of products that have not been crawled successfully
for product_id in product_ids: """
# Get all links for this product total_crawls = 0
product_links = sql.getProductLinksForProduct(product_id) successful_crawls = 0
products_with_problems = []
crawled_data = []
# Iterate over every product that has to be crawled
# Iterate over every link / vendor for product_id in product_ids:
for product_vendor_info in product_links: # Get all links for this product
total_crawls += 1 product_links = sql.getProductLinksForProduct(product_id)
# Call the appropriate vendor crawling function and append the result to the list of crawled data crawled_data = []
if product_vendor_info['vendor_id'] == 1:
# Amazon # Iterate over every link / vendor
crawled_data.append(__crawl_amazon__(product_vendor_info)) for product_vendor_info in product_links:
elif product_vendor_info['vendor_id'] == 2: total_crawls += 1
# Apple
crawled_data.append(__crawl_apple__(product_vendor_info)) # Call the appropriate vendor crawling function and append the result to the list of crawled data
elif product_vendor_info['vendor_id'] == 3: if product_vendor_info['vendor_id'] == 1:
# Media Markt # Amazon
crawled_data.append(__crawl_mediamarkt__(product_vendor_info)) data = __crawl_amazon__(product_vendor_info)
else: if data:
products_with_problems.append(product_vendor_info) crawled_data.append(data)
continue elif product_vendor_info['vendor_id'] == 2:
# Apple
successful_crawls += 1 data = __crawl_apple__(product_vendor_info)
if data:
# Insert data to SQL crawled_data.append(data)
sql.insertData(crawled_data) elif product_vendor_info['vendor_id'] == 3:
# Media Markt
return { data = __crawl_mediamarkt__(product_vendor_info)
'total_crawls': total_crawls, if data:
'successful_crawls': successful_crawls, crawled_data.append(data)
'products_with_problems': products_with_problems else:
} products_with_problems.append(product_vendor_info)
continue
def __crawl_amazon__(product_info: dict) -> tuple: successful_crawls += 1
"""
Crawls the price for the given product from amazon # Insert data to SQL
:param product_info: A dict with product info containing product_id, vendor_id, url sql.insertData(crawled_data)
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
""" return {
return (product_info['product_id'], product_info['vendor_id'], 123) 'total_crawls': total_crawls,
'successful_crawls': successful_crawls,
'products_with_problems': products_with_problems
def __crawl_apple__(product_info: dict) -> tuple: }
"""
Crawls the price for the given product from apple
:param product_info: A dict with product info containing product_id, vendor_id, url def __crawl_amazon__(product_info: dict) -> tuple:
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) """
""" Crawls the price for the given product from amazon
return (product_info['product_id'], product_info['vendor_id'], 123) :param product_info: A dict with product info containing product_id, vendor_id, url
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
"""
def __crawl_mediamarkt__(product_info: dict) -> tuple: page = requests.get(product_info['url'], headers=HEADERS)
""" soup = BeautifulSoup(page.content, features="lxml")
Crawls the price for the given product from media markt try:
:param product_info: A dict with product info containing product_id, vendor_id, url price = int(
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) soup.find(id='priceblock_ourprice').get_text().replace(".", "").replace(",", "").replace("", "").strip())
""" if not price:
pass price = int(soup.find(id='price_inside_buybox').get_text().replace(".", "").replace(",", "").replace("", "").strip())
except RuntimeError:
price = -1
except AttributeError:
price = -1
if price != -1:
return (product_info['product_id'], product_info['vendor_id'], price)
else:
return None
def __crawl_apple__(product_info: dict) -> tuple:
"""
Crawls the price for the given product from apple
:param product_info: A dict with product info containing product_id, vendor_id, url
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
"""
# return (product_info['product_id'], product_info['vendor_id'], 123)
pass
def __crawl_mediamarkt__(product_info: dict) -> tuple:
"""
Crawls the price for the given product from media markt
:param product_info: A dict with product info containing product_id, vendor_id, url
:return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents)
"""
pass

View File

@ -1,66 +0,0 @@
# -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urlencode
from urllib.parse import urljoin
import re
import json
queries = ['iphone']
API = ''
def get_url(url):
payload = {'api_key': API, 'url': url, 'country_code': 'us'}
proxy_url = 'http://api.scraperapi.com/?' + urlencode(payload)
return proxy_url
class AmazonSpider(scrapy.Spider):
name = 'amazon'
def start_requests(self):
for query in queries:
url = 'https://www.amazon.de/s?' + urlencode({'k': query})
yield scrapy.Request(url=url, callback=self.parse_keyword_response)
def parse_keyword_response(self, response):
products = response.xpath('//*[@data-asin]')
for product in products:
asin = product.xpath('@data-asin').extract_first()
product_url = f"https://www.amazon.de/dp/{asin}"
yield scrapy.Request(url=product_url, callback=self.parse_product_page, meta={'asin': asin})
next_page = response.xpath('//li[@class="a-last"]/a/@href').extract_first()
if next_page:
url = urljoin("https://www.amazon.de", next_page)
yield scrapy.Request(url=url, callback=self.parse_keyword_response)
def parse_product_page(self, response):
asin = response.meta['asin']
title = response.xpath('//*[@id="productTitle"]/text()').extract_first()
image = re.search('"large":"(.*?)"', response.text).groups()[0]
rating = response.xpath('//*[@id="acrPopover"]/@title').extract_first()
number_of_reviews = response.xpath('//*[@id="acrCustomerReviewText"]/text()').extract_first()
price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first()
if not price:
price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \
response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first()
temp = response.xpath('//*[@id="twister"]')
sizes = []
colors = []
if temp:
s = re.search('"variationValues" : ({.*})', response.text).groups()[0]
json_acceptable = s.replace("'", "\"")
di = json.loads(json_acceptable)
sizes = di.get('size_name', [])
colors = di.get('color_name', [])
bullet_points = response.xpath('//*[@id="feature-bullets"]//li/span/text()').extract()
seller_rank = response.xpath(
'//*[text()="Amazon Best Sellers Rank:"]/parent::*//text()[not(parent::style)]').extract()
yield {'asin': asin, 'Title': title, 'MainImage': image, 'Rating': rating, 'NumberOfReviews': number_of_reviews,
'Price': price, 'AvailableSizes': sizes, 'AvailableColors': colors, 'BulletPoints': bullet_points,
'SellerRank': seller_rank}

View File

@ -1,5 +1,7 @@
pymysql pymysql
flask flask==1.1.2
flask-sqlalchemy flask-sqlalchemy
flask_restful flask_restful
scrapy beautifulsoup4
requests
lxml

View File

@ -54,7 +54,6 @@ def getProductLinksForProduct(product_id: int) -> [dict]:
cur = conn.cursor() cur = conn.cursor()
query = 'SELECT vendor_id, url FROM product_links WHERE product_id = %s' query = 'SELECT vendor_id, url FROM product_links WHERE product_id = %s'
cur.execute(query, (product_id,)) cur.execute(query, (product_id,))
products = list(map(lambda x: {'product_id': product_id, 'vendor_id': x[0], 'url': x[1]}, cur.fetchall())) products = list(map(lambda x: {'product_id': product_id, 'vendor_id': x[0], 'url': x[1]}, cur.fetchall()))

View File

@ -0,0 +1,33 @@
import scrapy
from scrapy.crawler import CrawlerProcess
import re
class AmazonSpider(scrapy.Spider):
name = 'amazon'
allowed_domains = ['amazon.de']
start_urls = ['https://amazon.de/dp/B083DRCPJG']
# def __init__(self, start_urls):
# self.start_urls = start_urls
def parse(self, response):
price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first()
if not price:
price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \
response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first()
euros = re.match('(\d*),\d\d', price).group(1)
cents = re.match('\d*,(\d\d)', price).group(1)
priceincents = euros + cents
yield {'price': priceincents}
def start_crawling():
process = CrawlerProcess(
settings={'COOKIES_ENABLED': 'False', 'CONCURRENT_REQUESTS_PER_IP': 1, 'ROBOTSTXT_OBEY': False,
'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'DOWNLOAD_DELAY': 3}
, install_root_handler=False)
process.crawl()
process.start()

View File

@ -8,4 +8,4 @@ default = crawler.settings
[deploy] [deploy]
#url = http://localhost:6800/ #url = http://localhost:6800/
project = crawler project = crawler

View File

@ -0,0 +1,25 @@
import scrapy
import re
class AmazonSpider(scrapy.Spider):
name = 'amazon'
allowed_domains = ['amazon.de']
start_urls = ['https://amazon.de/dp/B083DRCPJG']
def parse(self, response):
price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first()
if not price:
price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \
response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first()
euros = re.match('(\d*),\d\d', price).group(1)
cents = re.match('\d*,(\d\d)', price).group(1)
priceincents = euros + cents
yield {'price': priceincents}