mirror of
				https://github.com/Mueller-Patrick/Betterzon.git
				synced 2025-10-31 00:35:48 +00:00 
			
		
		
		
	* BETTERZON-58: Basic Functionality with scrapy * Added independent crawler function, yielding price * moved logic to amazon.py * . * moved scrapy files to unused folder * Added basic amazon crawler using beautifulsoup4 * Connected Api to Crawler * Fixed string concatenation for sql statement in getProductLinksForProduct * BETTERZON-58: Fixing SQL insert * BETTERZON-58: Adding access key verification * BETTERZON-58: Fixing API endpoint of the crawler - The list of products in the API request was treated like a string and henceforth, only the first product has been crawled * Added another selector for price on amazon (does not work for books) Co-authored-by: root <root@DESKTOP-ARBPL82.localdomain> Co-authored-by: Patrick Müller <patrick@mueller-patrick.tech> Co-authored-by: Patrick <50352812+Mueller-Patrick@users.noreply.github.com>
		
			
				
	
	
		
			89 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			89 lines
		
	
	
		
			3.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Scrapy settings for crawler project
 | |
| #
 | |
| # For simplicity, this file contains only settings considered important or
 | |
| # commonly used. You can find more settings consulting the documentation:
 | |
| #
 | |
| #     https://docs.scrapy.org/en/latest/topics/settings.html
 | |
| #     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
 | |
| #     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
 | |
| 
 | |
| BOT_NAME = 'crawler'
 | |
| 
 | |
| SPIDER_MODULES = ['crawler.spiders']
 | |
| NEWSPIDER_MODULE = 'crawler.spiders'
 | |
| 
 | |
| 
 | |
| # Crawl responsibly by identifying yourself (and your website) on the user-agent
 | |
| USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
 | |
| 
 | |
| # Obey robots.txt rules
 | |
| ROBOTSTXT_OBEY = False
 | |
| 
 | |
| # Configure maximum concurrent requests performed by Scrapy (default: 16)
 | |
| #CONCURRENT_REQUESTS = 32
 | |
| 
 | |
| # Configure a delay for requests for the same website (default: 0)
 | |
| # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
 | |
| # See also autothrottle settings and docs
 | |
| DOWNLOAD_DELAY = 3
 | |
| # The download delay setting will honor only one of:
 | |
| #CONCURRENT_REQUESTS_PER_DOMAIN = 16
 | |
| CONCURRENT_REQUESTS_PER_IP = 1
 | |
| 
 | |
| # Disable cookies (enabled by default)
 | |
| COOKIES_ENABLED = False
 | |
| 
 | |
| # Disable Telnet Console (enabled by default)
 | |
| #TELNETCONSOLE_ENABLED = False
 | |
| 
 | |
| # Override the default request headers:
 | |
| #DEFAULT_REQUEST_HEADERS = {
 | |
| #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
 | |
| #   'Accept-Language': 'en',
 | |
| #}
 | |
| 
 | |
| # Enable or disable spider middlewares
 | |
| # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
 | |
| #SPIDER_MIDDLEWARES = {
 | |
| #    'crawler.middlewares.CrawlerSpiderMiddleware': 543,
 | |
| #}
 | |
| 
 | |
| # Enable or disable downloader middlewares
 | |
| # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
 | |
| #DOWNLOADER_MIDDLEWARES = {
 | |
| #    'crawler.middlewares.CrawlerDownloaderMiddleware': 543,
 | |
| #}
 | |
| 
 | |
| # Enable or disable extensions
 | |
| # See https://docs.scrapy.org/en/latest/topics/extensions.html
 | |
| #EXTENSIONS = {
 | |
| #    'scrapy.extensions.telnet.TelnetConsole': None,
 | |
| #}
 | |
| 
 | |
| # Configure item pipelines
 | |
| # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
 | |
| #ITEM_PIPELINES = {
 | |
| #    'crawler.pipelines.CrawlerPipeline': 300,
 | |
| #}
 | |
| 
 | |
| # Enable and configure the AutoThrottle extension (disabled by default)
 | |
| # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
 | |
| AUTOTHROTTLE_ENABLED = True
 | |
| # The initial download delay
 | |
| AUTOTHROTTLE_START_DELAY = 5
 | |
| # The maximum download delay to be set in case of high latencies
 | |
| #AUTOTHROTTLE_MAX_DELAY = 60
 | |
| # The average number of requests Scrapy should be sending in parallel to
 | |
| # each remote server
 | |
| #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
 | |
| # Enable showing throttling stats for every response received:
 | |
| #AUTOTHROTTLE_DEBUG = False
 | |
| 
 | |
| # Enable and configure HTTP caching (disabled by default)
 | |
| # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
 | |
| #HTTPCACHE_ENABLED = True
 | |
| #HTTPCACHE_EXPIRATION_SECS = 0
 | |
| #HTTPCACHE_DIR = 'httpcache'
 | |
| #HTTPCACHE_IGNORE_HTTP_CODES = []
 | |
| #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
 |