mirror of
				https://github.com/Mueller-Patrick/Betterzon.git
				synced 2025-11-04 02:25:48 +00:00 
			
		
		
		
	BETTERZON-58: Basic Functionality with scrapy
This commit is contained in:
		
							parent
							
								
									21d5294a57
								
							
						
					
					
						commit
						8e58efa42c
					
				
							
								
								
									
										0
									
								
								Crawler/crawler/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								Crawler/crawler/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										12
									
								
								Crawler/crawler/items.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								Crawler/crawler/items.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,12 @@
 | 
			
		|||
# Define here the models for your scraped items
 | 
			
		||||
#
 | 
			
		||||
# See documentation in:
 | 
			
		||||
# https://docs.scrapy.org/en/latest/topics/items.html
 | 
			
		||||
 | 
			
		||||
import scrapy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CrawlerItem(scrapy.Item):
 | 
			
		||||
    # define the fields for your item here like:
 | 
			
		||||
    # name = scrapy.Field()
 | 
			
		||||
    pass
 | 
			
		||||
							
								
								
									
										103
									
								
								Crawler/crawler/middlewares.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								Crawler/crawler/middlewares.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,103 @@
 | 
			
		|||
# Define here the models for your spider middleware
 | 
			
		||||
#
 | 
			
		||||
# See documentation in:
 | 
			
		||||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
 | 
			
		||||
 | 
			
		||||
from scrapy import signals
 | 
			
		||||
 | 
			
		||||
# useful for handling different item types with a single interface
 | 
			
		||||
from itemadapter import is_item, ItemAdapter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CrawlerSpiderMiddleware:
 | 
			
		||||
    # Not all methods need to be defined. If a method is not defined,
 | 
			
		||||
    # scrapy acts as if the spider middleware does not modify the
 | 
			
		||||
    # passed objects.
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_crawler(cls, crawler):
 | 
			
		||||
        # This method is used by Scrapy to create your spiders.
 | 
			
		||||
        s = cls()
 | 
			
		||||
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
    def process_spider_input(self, response, spider):
 | 
			
		||||
        # Called for each response that goes through the spider
 | 
			
		||||
        # middleware and into the spider.
 | 
			
		||||
 | 
			
		||||
        # Should return None or raise an exception.
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def process_spider_output(self, response, result, spider):
 | 
			
		||||
        # Called with the results returned from the Spider, after
 | 
			
		||||
        # it has processed the response.
 | 
			
		||||
 | 
			
		||||
        # Must return an iterable of Request, or item objects.
 | 
			
		||||
        for i in result:
 | 
			
		||||
            yield i
 | 
			
		||||
 | 
			
		||||
    def process_spider_exception(self, response, exception, spider):
 | 
			
		||||
        # Called when a spider or process_spider_input() method
 | 
			
		||||
        # (from other spider middleware) raises an exception.
 | 
			
		||||
 | 
			
		||||
        # Should return either None or an iterable of Request or item objects.
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def process_start_requests(self, start_requests, spider):
 | 
			
		||||
        # Called with the start requests of the spider, and works
 | 
			
		||||
        # similarly to the process_spider_output() method, except
 | 
			
		||||
        # that it doesn’t have a response associated.
 | 
			
		||||
 | 
			
		||||
        # Must return only requests (not items).
 | 
			
		||||
        for r in start_requests:
 | 
			
		||||
            yield r
 | 
			
		||||
 | 
			
		||||
    def spider_opened(self, spider):
 | 
			
		||||
        spider.logger.info('Spider opened: %s' % spider.name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CrawlerDownloaderMiddleware:
 | 
			
		||||
    # Not all methods need to be defined. If a method is not defined,
 | 
			
		||||
    # scrapy acts as if the downloader middleware does not modify the
 | 
			
		||||
    # passed objects.
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_crawler(cls, crawler):
 | 
			
		||||
        # This method is used by Scrapy to create your spiders.
 | 
			
		||||
        s = cls()
 | 
			
		||||
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
    def process_request(self, request, spider):
 | 
			
		||||
        # Called for each request that goes through the downloader
 | 
			
		||||
        # middleware.
 | 
			
		||||
 | 
			
		||||
        # Must either:
 | 
			
		||||
        # - return None: continue processing this request
 | 
			
		||||
        # - or return a Response object
 | 
			
		||||
        # - or return a Request object
 | 
			
		||||
        # - or raise IgnoreRequest: process_exception() methods of
 | 
			
		||||
        #   installed downloader middleware will be called
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def process_response(self, request, response, spider):
 | 
			
		||||
        # Called with the response returned from the downloader.
 | 
			
		||||
 | 
			
		||||
        # Must either;
 | 
			
		||||
        # - return a Response object
 | 
			
		||||
        # - return a Request object
 | 
			
		||||
        # - or raise IgnoreRequest
 | 
			
		||||
        return response
 | 
			
		||||
 | 
			
		||||
    def process_exception(self, request, exception, spider):
 | 
			
		||||
        # Called when a download handler or a process_request()
 | 
			
		||||
        # (from other downloader middleware) raises an exception.
 | 
			
		||||
 | 
			
		||||
        # Must either:
 | 
			
		||||
        # - return None: continue processing this exception
 | 
			
		||||
        # - return a Response object: stops process_exception() chain
 | 
			
		||||
        # - return a Request object: stops process_exception() chain
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def spider_opened(self, spider):
 | 
			
		||||
        spider.logger.info('Spider opened: %s' % spider.name)
 | 
			
		||||
							
								
								
									
										13
									
								
								Crawler/crawler/pipelines.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								Crawler/crawler/pipelines.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,13 @@
 | 
			
		|||
# Define your item pipelines here
 | 
			
		||||
#
 | 
			
		||||
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
 | 
			
		||||
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# useful for handling different item types with a single interface
 | 
			
		||||
from itemadapter import ItemAdapter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CrawlerPipeline:
 | 
			
		||||
    def process_item(self, item, spider):
 | 
			
		||||
        return item
 | 
			
		||||
							
								
								
									
										88
									
								
								Crawler/crawler/settings.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								Crawler/crawler/settings.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,88 @@
 | 
			
		|||
# Scrapy settings for crawler project
 | 
			
		||||
#
 | 
			
		||||
# For simplicity, this file contains only settings considered important or
 | 
			
		||||
# commonly used. You can find more settings consulting the documentation:
 | 
			
		||||
#
 | 
			
		||||
#     https://docs.scrapy.org/en/latest/topics/settings.html
 | 
			
		||||
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
 | 
			
		||||
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
 | 
			
		||||
 | 
			
		||||
BOT_NAME = 'crawler'
 | 
			
		||||
 | 
			
		||||
SPIDER_MODULES = ['crawler.spiders']
 | 
			
		||||
NEWSPIDER_MODULE = 'crawler.spiders'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Crawl responsibly by identifying yourself (and your website) on the user-agent
 | 
			
		||||
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
 | 
			
		||||
 | 
			
		||||
# Obey robots.txt rules
 | 
			
		||||
ROBOTSTXT_OBEY = False
 | 
			
		||||
 | 
			
		||||
# Configure maximum concurrent requests performed by Scrapy (default: 16)
 | 
			
		||||
#CONCURRENT_REQUESTS = 32
 | 
			
		||||
 | 
			
		||||
# Configure a delay for requests for the same website (default: 0)
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
 | 
			
		||||
# See also autothrottle settings and docs
 | 
			
		||||
DOWNLOAD_DELAY = 3
 | 
			
		||||
# The download delay setting will honor only one of:
 | 
			
		||||
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
 | 
			
		||||
CONCURRENT_REQUESTS_PER_IP = 1
 | 
			
		||||
 | 
			
		||||
# Disable cookies (enabled by default)
 | 
			
		||||
COOKIES_ENABLED = False
 | 
			
		||||
 | 
			
		||||
# Disable Telnet Console (enabled by default)
 | 
			
		||||
#TELNETCONSOLE_ENABLED = False
 | 
			
		||||
 | 
			
		||||
# Override the default request headers:
 | 
			
		||||
#DEFAULT_REQUEST_HEADERS = {
 | 
			
		||||
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 | 
			
		||||
#   'Accept-Language': 'en',
 | 
			
		||||
#}
 | 
			
		||||
 | 
			
		||||
# Enable or disable spider middlewares
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
 | 
			
		||||
#SPIDER_MIDDLEWARES = {
 | 
			
		||||
#    'crawler.middlewares.CrawlerSpiderMiddleware': 543,
 | 
			
		||||
#}
 | 
			
		||||
 | 
			
		||||
# Enable or disable downloader middlewares
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
 | 
			
		||||
#DOWNLOADER_MIDDLEWARES = {
 | 
			
		||||
#    'crawler.middlewares.CrawlerDownloaderMiddleware': 543,
 | 
			
		||||
#}
 | 
			
		||||
 | 
			
		||||
# Enable or disable extensions
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/extensions.html
 | 
			
		||||
#EXTENSIONS = {
 | 
			
		||||
#    'scrapy.extensions.telnet.TelnetConsole': None,
 | 
			
		||||
#}
 | 
			
		||||
 | 
			
		||||
# Configure item pipelines
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
 | 
			
		||||
#ITEM_PIPELINES = {
 | 
			
		||||
#    'crawler.pipelines.CrawlerPipeline': 300,
 | 
			
		||||
#}
 | 
			
		||||
 | 
			
		||||
# Enable and configure the AutoThrottle extension (disabled by default)
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
 | 
			
		||||
AUTOTHROTTLE_ENABLED = True
 | 
			
		||||
# The initial download delay
 | 
			
		||||
AUTOTHROTTLE_START_DELAY = 5
 | 
			
		||||
# The maximum download delay to be set in case of high latencies
 | 
			
		||||
#AUTOTHROTTLE_MAX_DELAY = 60
 | 
			
		||||
# The average number of requests Scrapy should be sending in parallel to
 | 
			
		||||
# each remote server
 | 
			
		||||
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
 | 
			
		||||
# Enable showing throttling stats for every response received:
 | 
			
		||||
#AUTOTHROTTLE_DEBUG = False
 | 
			
		||||
 | 
			
		||||
# Enable and configure HTTP caching (disabled by default)
 | 
			
		||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
 | 
			
		||||
#HTTPCACHE_ENABLED = True
 | 
			
		||||
#HTTPCACHE_EXPIRATION_SECS = 0
 | 
			
		||||
#HTTPCACHE_DIR = 'httpcache'
 | 
			
		||||
#HTTPCACHE_IGNORE_HTTP_CODES = []
 | 
			
		||||
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
 | 
			
		||||
							
								
								
									
										4
									
								
								Crawler/crawler/spiders/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								Crawler/crawler/spiders/__init__.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,4 @@
 | 
			
		|||
# This package will contain the spiders of your Scrapy project
 | 
			
		||||
#
 | 
			
		||||
# Please refer to the documentation for information on how to create and manage
 | 
			
		||||
# your spiders.
 | 
			
		||||
							
								
								
									
										66
									
								
								Crawler/crawler/spiders/amazon.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								Crawler/crawler/spiders/amazon.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,66 @@
 | 
			
		|||
# -*- coding: utf-8 -*-
 | 
			
		||||
import scrapy
 | 
			
		||||
from urllib.parse import urlencode
 | 
			
		||||
from urllib.parse import urljoin
 | 
			
		||||
import re
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
queries = ['iphone']
 | 
			
		||||
API = ''
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_url(url):
 | 
			
		||||
    payload = {'api_key': API, 'url': url, 'country_code': 'us'}
 | 
			
		||||
    proxy_url = 'http://api.scraperapi.com/?' + urlencode(payload)
 | 
			
		||||
    return proxy_url
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AmazonSpider(scrapy.Spider):
 | 
			
		||||
    name = 'amazon'
 | 
			
		||||
 | 
			
		||||
    def start_requests(self):
 | 
			
		||||
        for query in queries:
 | 
			
		||||
            url = 'https://www.amazon.de/s?' + urlencode({'k': query})
 | 
			
		||||
            yield scrapy.Request(url=url, callback=self.parse_keyword_response)
 | 
			
		||||
 | 
			
		||||
    def parse_keyword_response(self, response):
 | 
			
		||||
        products = response.xpath('//*[@data-asin]')
 | 
			
		||||
 | 
			
		||||
        for product in products:
 | 
			
		||||
            asin = product.xpath('@data-asin').extract_first()
 | 
			
		||||
            product_url = f"https://www.amazon.de/dp/{asin}"
 | 
			
		||||
            yield scrapy.Request(url=product_url, callback=self.parse_product_page, meta={'asin': asin})
 | 
			
		||||
 | 
			
		||||
        next_page = response.xpath('//li[@class="a-last"]/a/@href').extract_first()
 | 
			
		||||
        if next_page:
 | 
			
		||||
            url = urljoin("https://www.amazon.de", next_page)
 | 
			
		||||
            yield scrapy.Request(url=url, callback=self.parse_keyword_response)
 | 
			
		||||
 | 
			
		||||
    def parse_product_page(self, response):
 | 
			
		||||
        asin = response.meta['asin']
 | 
			
		||||
        title = response.xpath('//*[@id="productTitle"]/text()').extract_first()
 | 
			
		||||
        image = re.search('"large":"(.*?)"', response.text).groups()[0]
 | 
			
		||||
        rating = response.xpath('//*[@id="acrPopover"]/@title').extract_first()
 | 
			
		||||
        number_of_reviews = response.xpath('//*[@id="acrCustomerReviewText"]/text()').extract_first()
 | 
			
		||||
        price = response.xpath('//*[@id="priceblock_ourprice"]/text()').extract_first()
 | 
			
		||||
 | 
			
		||||
        if not price:
 | 
			
		||||
            price = response.xpath('//*[@data-asin-price]/@data-asin-price').extract_first() or \
 | 
			
		||||
                    response.xpath('//*[@id="price_inside_buybox"]/text()').extract_first()
 | 
			
		||||
 | 
			
		||||
        temp = response.xpath('//*[@id="twister"]')
 | 
			
		||||
        sizes = []
 | 
			
		||||
        colors = []
 | 
			
		||||
        if temp:
 | 
			
		||||
            s = re.search('"variationValues" : ({.*})', response.text).groups()[0]
 | 
			
		||||
            json_acceptable = s.replace("'", "\"")
 | 
			
		||||
            di = json.loads(json_acceptable)
 | 
			
		||||
            sizes = di.get('size_name', [])
 | 
			
		||||
            colors = di.get('color_name', [])
 | 
			
		||||
 | 
			
		||||
        bullet_points = response.xpath('//*[@id="feature-bullets"]//li/span/text()').extract()
 | 
			
		||||
        seller_rank = response.xpath(
 | 
			
		||||
            '//*[text()="Amazon Best Sellers Rank:"]/parent::*//text()[not(parent::style)]').extract()
 | 
			
		||||
        yield {'asin': asin, 'Title': title, 'MainImage': image, 'Rating': rating, 'NumberOfReviews': number_of_reviews,
 | 
			
		||||
               'Price': price, 'AvailableSizes': sizes, 'AvailableColors': colors, 'BulletPoints': bullet_points,
 | 
			
		||||
               'SellerRank': seller_rank}
 | 
			
		||||
| 
						 | 
				
			
			@ -2,3 +2,4 @@ pymysql
 | 
			
		|||
flask
 | 
			
		||||
flask-sqlalchemy
 | 
			
		||||
flask_restful
 | 
			
		||||
scrapy
 | 
			
		||||
							
								
								
									
										11
									
								
								Crawler/scrapy.cfg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								Crawler/scrapy.cfg
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,11 @@
 | 
			
		|||
# Automatically created by: scrapy startproject
 | 
			
		||||
#
 | 
			
		||||
# For more information about the [deploy] section see:
 | 
			
		||||
# https://scrapyd.readthedocs.io/en/latest/deploy.html
 | 
			
		||||
 | 
			
		||||
[settings]
 | 
			
		||||
default = crawler.settings
 | 
			
		||||
 | 
			
		||||
[deploy]
 | 
			
		||||
#url = http://localhost:6800/
 | 
			
		||||
project = crawler
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user