mirror of
				https://github.com/Mueller-Patrick/Betterzon.git
				synced 2025-10-31 08:45:48 +00:00 
			
		
		
		
	BETTERZON-58 (#53)
* BETTERZON-58: Basic Functionality with scrapy * Added independent crawler function, yielding price * moved logic to amazon.py * . * moved scrapy files to unused folder * Added basic amazon crawler using beautifulsoup4 * Connected Api to Crawler * Fixed string concatenation for sql statement in getProductLinksForProduct * BETTERZON-58: Fixing SQL insert * BETTERZON-58: Adding access key verification * BETTERZON-58: Fixing API endpoint of the crawler - The list of products in the API request was treated like a string and henceforth, only the first product has been crawled * Added another selector for price on amazon (does not work for books) Co-authored-by: root <root@DESKTOP-ARBPL82.localdomain> Co-authored-by: Patrick Müller <patrick@mueller-patrick.tech> Co-authored-by: Patrick <50352812+Mueller-Patrick@users.noreply.github.com>
This commit is contained in:
		
							parent
							
								
									3ae68b3df3
								
							
						
					
					
						commit
						26ba21156a
					
				|  | @ -2,13 +2,13 @@ | |||
| <module type="WEB_MODULE" version="4"> | ||||
|   <component name="FacetManager"> | ||||
|     <facet type="Python" name="Python"> | ||||
|       <configuration sdkName="Python 3.9" /> | ||||
|       <configuration sdkName="Python 3.9 (venv)" /> | ||||
|     </facet> | ||||
|   </component> | ||||
|   <component name="NewModuleRootManager" inherit-compiler-output="true"> | ||||
|     <exclude-output /> | ||||
|     <content url="file://$MODULE_DIR$" /> | ||||
|     <orderEntry type="sourceFolder" forTests="false" /> | ||||
|     <orderEntry type="library" name="Python 3.9 interpreter library" level="application" /> | ||||
|     <orderEntry type="library" name="Python 3.9 (venv) interpreter library" level="application" /> | ||||
|   </component> | ||||
| </module> | ||||
|  | @ -1,13 +1,17 @@ | |||
| import os | ||||
| 
 | ||||
| from flask import Flask | ||||
| from flask_restful import Resource, Api, reqparse | ||||
| 
 | ||||
| import crawler | ||||
| 
 | ||||
| app = Flask(__name__) | ||||
| api = Api(app) | ||||
| 
 | ||||
| # To parse request data | ||||
| parser = reqparse.RequestParser() | ||||
| parser.add_argument('key') | ||||
| parser.add_argument('products') | ||||
| parser.add_argument('key', type=str) | ||||
| parser.add_argument('products', type=int, action='append') | ||||
| 
 | ||||
| 
 | ||||
| class CrawlerApi(Resource): | ||||
|  | @ -17,7 +21,12 @@ class CrawlerApi(Resource): | |||
|     def post(self): | ||||
|         # Accept crawler request here | ||||
|         args = parser.parse_args() | ||||
|         return args | ||||
|         access_key = os.getenv('CRAWLER_ACCESS_KEY') | ||||
|         if(args['key'] == access_key): | ||||
|             crawler.crawl(args['products']) | ||||
|             return {'message': 'success'} | ||||
|         else: | ||||
|             return {'message': 'Wrong access key'} | ||||
| 
 | ||||
| 
 | ||||
| api.add_resource(CrawlerApi, '/') | ||||
|  |  | |||
|  | @ -1,78 +1,107 @@ | |||
| import sql | ||||
| 
 | ||||
| 
 | ||||
| def crawl(product_ids: [int]) -> dict: | ||||
|     """ | ||||
|     Crawls the given list of products and saves the results to sql | ||||
|     :param products: The list of product IDs to fetch | ||||
|     :return: A dict with the following fields: | ||||
|                 total_crawls: number of total crawl tries (products * vendors per product) | ||||
|                 successful_crawls: number of successful products | ||||
|                 products_with_problems: list of products that have not been crawled successfully | ||||
|     """ | ||||
|     total_crawls = 0 | ||||
|     successful_crawls = 0 | ||||
|     products_with_problems = [] | ||||
| 
 | ||||
|     # Iterate over every product that has to be crawled | ||||
|     for product_id in product_ids: | ||||
|         # Get all links for this product | ||||
|         product_links = sql.getProductLinksForProduct(product_id) | ||||
| 
 | ||||
|         crawled_data = [] | ||||
| 
 | ||||
|         # Iterate over every link / vendor | ||||
|         for product_vendor_info in product_links: | ||||
|             total_crawls += 1 | ||||
| 
 | ||||
|             # Call the appropriate vendor crawling function and append the result to the list of crawled data | ||||
|             if product_vendor_info['vendor_id'] == 1: | ||||
|                 # Amazon | ||||
|                 crawled_data.append(__crawl_amazon__(product_vendor_info)) | ||||
|             elif product_vendor_info['vendor_id'] == 2: | ||||
|                 # Apple | ||||
|                 crawled_data.append(__crawl_apple__(product_vendor_info)) | ||||
|             elif product_vendor_info['vendor_id'] == 3: | ||||
|                 # Media Markt | ||||
|                 crawled_data.append(__crawl_mediamarkt__(product_vendor_info)) | ||||
|             else: | ||||
|                 products_with_problems.append(product_vendor_info) | ||||
|                 continue | ||||
| 
 | ||||
|             successful_crawls += 1 | ||||
| 
 | ||||
|         # Insert data to SQL | ||||
|         sql.insertData(crawled_data) | ||||
| 
 | ||||
|     return { | ||||
|         'total_crawls': total_crawls, | ||||
|         'successful_crawls': successful_crawls, | ||||
|         'products_with_problems': products_with_problems | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_amazon__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from amazon | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     return (product_info['product_id'], product_info['vendor_id'], 123) | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_apple__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from apple | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     return (product_info['product_id'], product_info['vendor_id'], 123) | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_mediamarkt__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from media markt | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     pass | ||||
| import sql | ||||
| import requests | ||||
| from bs4 import BeautifulSoup | ||||
| 
 | ||||
| HEADERS = ({'User-Agent': | ||||
|                 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 ' | ||||
|                 'Safari/537.36'}) | ||||
| 
 | ||||
| 
 | ||||
| def crawl(product_ids: [int]) -> dict: | ||||
|     """ | ||||
|     Crawls the given list of products and saves the results to sql | ||||
|     :param products: The list of product IDs to fetch | ||||
|     :return: A dict with the following fields: | ||||
|                 total_crawls: number of total crawl tries (products * vendors per product) | ||||
|                 successful_crawls: number of successful products | ||||
|                 products_with_problems: list of products that have not been crawled successfully | ||||
|     """ | ||||
|     total_crawls = 0 | ||||
|     successful_crawls = 0 | ||||
|     products_with_problems = [] | ||||
| 
 | ||||
|     # Iterate over every product that has to be crawled | ||||
|     for product_id in product_ids: | ||||
|         # Get all links for this product | ||||
|         product_links = sql.getProductLinksForProduct(product_id) | ||||
| 
 | ||||
|         crawled_data = [] | ||||
| 
 | ||||
|         # Iterate over every link / vendor | ||||
|         for product_vendor_info in product_links: | ||||
|             total_crawls += 1 | ||||
| 
 | ||||
|             # Call the appropriate vendor crawling function and append the result to the list of crawled data | ||||
|             if product_vendor_info['vendor_id'] == 1: | ||||
|                 # Amazon | ||||
|                 data = __crawl_amazon__(product_vendor_info) | ||||
|                 if data: | ||||
|                     crawled_data.append(data) | ||||
|             elif product_vendor_info['vendor_id'] == 2: | ||||
|                 # Apple | ||||
|                 data = __crawl_apple__(product_vendor_info) | ||||
|                 if data: | ||||
|                     crawled_data.append(data) | ||||
|             elif product_vendor_info['vendor_id'] == 3: | ||||
|                 # Media Markt | ||||
|                 data = __crawl_mediamarkt__(product_vendor_info) | ||||
|                 if data: | ||||
|                     crawled_data.append(data) | ||||
|             else: | ||||
|                 products_with_problems.append(product_vendor_info) | ||||
|                 continue | ||||
| 
 | ||||
|             successful_crawls += 1 | ||||
| 
 | ||||
|         # Insert data to SQL | ||||
|         sql.insertData(crawled_data) | ||||
| 
 | ||||
|     return { | ||||
|         'total_crawls': total_crawls, | ||||
|         'successful_crawls': successful_crawls, | ||||
|         'products_with_problems': products_with_problems | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_amazon__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from amazon | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     page = requests.get(product_info['url'], headers=HEADERS) | ||||
|     soup = BeautifulSoup(page.content, features="lxml") | ||||
|     try: | ||||
|         price = int( | ||||
|             soup.find(id='priceblock_ourprice').get_text().replace(".", "").replace(",", "").replace("€", "").strip()) | ||||
|         if not price: | ||||
|             price = int(soup.find(id='price_inside_buybox').get_text().replace(".", "").replace(",", "").replace("€", "").strip()) | ||||
| 
 | ||||
|     except RuntimeError: | ||||
|         price = -1 | ||||
|     except AttributeError: | ||||
|         price = -1 | ||||
| 
 | ||||
|     if price != -1: | ||||
|         return (product_info['product_id'], product_info['vendor_id'], price) | ||||
|     else: | ||||
|         return None | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_apple__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from apple | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     # return (product_info['product_id'], product_info['vendor_id'], 123) | ||||
|     pass | ||||
| 
 | ||||
| 
 | ||||
| def __crawl_mediamarkt__(product_info: dict) -> tuple: | ||||
|     """ | ||||
|     Crawls the price for the given product from media markt | ||||
|     :param product_info: A dict with product info containing product_id, vendor_id, url | ||||
|     :return: A tuple with the crawled data, containing (product_id, vendor_id, price_in_cents) | ||||
|     """ | ||||
|     pass | ||||
|  |  | |||
|  | @ -1,66 +0,0 @@ | |||
| # -*- coding: utf-8 -*- | ||||
| import scrapy | ||||
| from urllib.parse import urlencode | ||||
| from urllib.parse import urljoin | ||||
| import re | ||||
| import json | ||||
| 
 | ||||
| queries = ['iphone'] | ||||
| API = '' | ||||
| 
 | ||||
| 
 | ||||
| def get_url(url): | ||||
|     payload = {'api_key': API, 'url': url, 'country_code': 'us'} | ||||
|     proxy_url = 'http://api.scraperapi.com/?' + urlencode(payload) | ||||
|     return proxy_url | ||||
| 
 | ||||
| 
 | ||||
| class AmazonSpider(scrapy.Spider): | ||||
|     name = 'amazon' | ||||
| 
 | ||||
|     def start_requests(self): | ||||
|         for query in queries: | ||||
|             url = 'https://www.amazon.de/s?' + urlencode({'k': query}) | ||||
|             yield scrapy.Request(url=url, callback=self.parse_keyword_response) | ||||
| 
 | ||||
|     def parse_keyword_response(self, response): | ||||
|         products = response.xpath('//*[@data-asin]') | ||||
| 
 | ||||
|         for product in products: | ||||
|             asin = product.xpath('@data-asin').extract_first() | ||||
|             product_url = f"https://www.amazon.de/dp/{asin}" | ||||
|             yield scrapy.Request(url=product_url, callback=self.parse_product_page, meta={'asin': asin}) | ||||
| 
 | ||||
|         next_page = response.xpath('//li[@class="a-last"]/a/@href').extract_first() | ||||
|         if next_page: | ||||
|             url = urljoin("https://www.amazon.de", next_page) | ||||
|             yield scrapy.Request(url=url, callback=self.parse_keyword_response) | ||||
| 
 | ||||
|     def parse_product_page(self, response): | ||||
|         asin = response.meta['asin'] | ||||
|         title = response.xpath('//*[@id="productTitle"]/text()').extract_first() | ||||
|         image = re.search('"large":"(.*?)"', response.text).groups()[0] | ||||
|         rating = response.xpath('//*[@id="acrPopover"]/@title').extract_first() | ||||
|         number_of_reviews = response.xpath('//*[@id="acrCustomerReviewText"]/text()').extract_first() | ||||
|         price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first() | ||||
| 
 | ||||
|         if not price: | ||||
|             price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \ | ||||
|                     response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first() | ||||
| 
 | ||||
|         temp = response.xpath('//*[@id="twister"]') | ||||
|         sizes = [] | ||||
|         colors = [] | ||||
|         if temp: | ||||
|             s = re.search('"variationValues" : ({.*})', response.text).groups()[0] | ||||
|             json_acceptable = s.replace("'", "\"") | ||||
|             di = json.loads(json_acceptable) | ||||
|             sizes = di.get('size_name', []) | ||||
|             colors = di.get('color_name', []) | ||||
| 
 | ||||
|         bullet_points = response.xpath('//*[@id="feature-bullets"]//li/span/text()').extract() | ||||
|         seller_rank = response.xpath( | ||||
|             '//*[text()="Amazon Best Sellers Rank:"]/parent::*//text()[not(parent::style)]').extract() | ||||
|         yield {'asin': asin, 'Title': title, 'MainImage': image, 'Rating': rating, 'NumberOfReviews': number_of_reviews, | ||||
|                'Price': price, 'AvailableSizes': sizes, 'AvailableColors': colors, 'BulletPoints': bullet_points, | ||||
|                'SellerRank': seller_rank} | ||||
|  | @ -1,5 +1,7 @@ | |||
| pymysql | ||||
| flask | ||||
| flask==1.1.2 | ||||
| flask-sqlalchemy | ||||
| flask_restful | ||||
| scrapy | ||||
| beautifulsoup4 | ||||
| requests | ||||
| lxml | ||||
|  | @ -54,7 +54,6 @@ def getProductLinksForProduct(product_id: int) -> [dict]: | |||
|     cur = conn.cursor() | ||||
| 
 | ||||
|     query = 'SELECT vendor_id, url FROM product_links WHERE product_id = %s' | ||||
| 
 | ||||
|     cur.execute(query, (product_id,)) | ||||
| 
 | ||||
|     products = list(map(lambda x: {'product_id': product_id, 'vendor_id': x[0], 'url': x[1]}, cur.fetchall())) | ||||
|  |  | |||
							
								
								
									
										33
									
								
								Crawler/unused/scrapy/amazonspider.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								Crawler/unused/scrapy/amazonspider.py
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,33 @@ | |||
| import scrapy | ||||
| from scrapy.crawler import CrawlerProcess | ||||
| import re | ||||
| 
 | ||||
| class AmazonSpider(scrapy.Spider): | ||||
|     name = 'amazon' | ||||
|     allowed_domains = ['amazon.de'] | ||||
|     start_urls = ['https://amazon.de/dp/B083DRCPJG'] | ||||
| 
 | ||||
|     # def __init__(self, start_urls): | ||||
|     #   self.start_urls = start_urls | ||||
| 
 | ||||
|     def parse(self, response): | ||||
|         price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first() | ||||
|         if not price: | ||||
|             price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \ | ||||
|                     response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first() | ||||
| 
 | ||||
|         euros = re.match('(\d*),\d\d', price).group(1) | ||||
|         cents = re.match('\d*,(\d\d)', price).group(1) | ||||
|         priceincents = euros + cents | ||||
| 
 | ||||
|         yield {'price': priceincents} | ||||
| 
 | ||||
| 
 | ||||
| def start_crawling(): | ||||
|     process = CrawlerProcess( | ||||
|         settings={'COOKIES_ENABLED': 'False', 'CONCURRENT_REQUESTS_PER_IP': 1, 'ROBOTSTXT_OBEY': False, | ||||
|                   'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36', | ||||
|                   'DOWNLOAD_DELAY': 3} | ||||
|         , install_root_handler=False) | ||||
|     process.crawl() | ||||
|     process.start() | ||||
|  | @ -8,4 +8,4 @@ default = crawler.settings | |||
| 
 | ||||
| [deploy] | ||||
| #url = http://localhost:6800/ | ||||
| project = crawler | ||||
| project = crawler | ||||
							
								
								
									
										25
									
								
								Crawler/unused/scrapy/spiders/amazon.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								Crawler/unused/scrapy/spiders/amazon.py
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,25 @@ | |||
| import scrapy | ||||
| import re | ||||
| 
 | ||||
| class AmazonSpider(scrapy.Spider): | ||||
|     name = 'amazon' | ||||
|     allowed_domains = ['amazon.de'] | ||||
|     start_urls = ['https://amazon.de/dp/B083DRCPJG'] | ||||
| 
 | ||||
|     def parse(self, response): | ||||
|         price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first() | ||||
|         if not price: | ||||
|             price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \ | ||||
|                     response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first() | ||||
| 
 | ||||
|         euros = re.match('(\d*),\d\d', price).group(1) | ||||
|         cents = re.match('\d*,(\d\d)', price).group(1) | ||||
|         priceincents = euros + cents | ||||
| 
 | ||||
|         yield {'price': priceincents} | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user