Skip to content
This repository was archived by the owner on Dec 22, 2023. It is now read-only.

Commit 2d7c59a

Browse files
authored
Merge pull request #104 from Apex-code/master
Added IMDB Spider
2 parents a42de99 + 13dbf2d commit 2d7c59a

File tree

10 files changed

+333
-0
lines changed

10 files changed

+333
-0
lines changed
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import scrapy
2+
from ..items import ImdbItem
3+
4+
class QuotesSpider(scrapy.Spider):
5+
name = "imdb"
6+
page_number = 2
7+
start_urls = [
8+
'https://www.imdb.com/list/ls061697854'
9+
]
10+
11+
def parse(self, response):
12+
items = ImdbItem()
13+
title = response.css('.lister-item-header a::text').extract()
14+
yearReleased = response.css('.text-muted.unbold::text').extract()
15+
rating = response.css('.ipl-rating-star.small .ipl-rating-star__rating::text').extract()
16+
votes = response.css('.text-muted+ span:nth-child(2)::text').extract()
17+
totalGross = response.css('.text-muted .ghost~ .text-muted+ span::text').extract()
18+
imageURL = response.css('#main .loadlate::attr(loadlate)').extract()
19+
genre = response.css('.genre::text').extract()
20+
21+
22+
items['title'] = title
23+
items['yearReleased'] = yearReleased
24+
items['rating'] = rating
25+
items['votes'] = votes
26+
items['totalGross'] = totalGross
27+
items['imageURL'] = imageURL
28+
items['genre'] = genre
29+
30+
yield items
31+
32+
next_page = 'https://www.imdb.com/list/ls061697854/?page=' + str(QuotesSpider.page_number)
33+
if QuotesSpider.page_number < 32:
34+
QuotesSpider.page_number += 1
35+
yield response.follow(next_page, callback=self.parse)
36+
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Subproject commit 017229962c69eb5ff3de8612b4a4846da47eb130
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
2+
# IMDB Scraper
3+
- Scrap ALL Hollywood Hindi Dubbed Movies
4+
- Saved Results Into DB or CSV
5+
- Fast af
6+
.
7+
8+
### Prerequisites
9+
10+
* [Scrapy](https://scrapy.org/)- Python framework for extracting the data.
11+
* [pymongo](https://pymongo.readthedocs.io/en/stable/#)- Module for working with MongoDB
12+
13+
### Installation
14+
15+
16+
```sh
17+
Installing Scrapy
18+
$ pip3 intstall scrapy
19+
```
20+
21+
22+
23+
24+
25+
```sh
26+
Installing pymongo
27+
$ pip3 install pymongo
28+
```
29+
30+
31+
32+
33+
# How to run the script
34+
1. Git clone repo
35+
2. Goto Project_Directory/
36+
3. Open Terminal/cmd
37+
4. scrapy crawl imdb
38+
39+
if you want output in csv add -o filename.csv to the command.
40+
41+
42+
### Screenshot/GIF showing the sample use of the script
43+
![scrapy cmd](imdb.gif)
44+
45+
Author
46+
----
47+
48+
Apex-Code
58.5 KB
Loading
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Define here the models for your scraped items
2+
#
3+
# See documentation in:
4+
# https://docs.scrapy.org/en/latest/topics/items.html
5+
6+
import scrapy
7+
8+
9+
class ImdbItem(scrapy.Item):
10+
# define the fields for your item here like:
11+
title = scrapy.Field()
12+
yearReleased = scrapy.Field()
13+
rating = scrapy.Field()
14+
votes = scrapy.Field()
15+
totalGross = scrapy.Field()
16+
imageURL = scrapy.Field()
17+
genre = scrapy.Field()
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
# Define here the models for your spider middleware
2+
#
3+
# See documentation in:
4+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5+
6+
from scrapy import signals
7+
8+
# useful for handling different item types with a single interface
9+
from itemadapter import is_item, ItemAdapter
10+
11+
12+
class ImdbSpiderMiddleware:
13+
# Not all methods need to be defined. If a method is not defined,
14+
# scrapy acts as if the spider middleware does not modify the
15+
# passed objects.
16+
17+
@classmethod
18+
def from_crawler(cls, crawler):
19+
# This method is used by Scrapy to create your spiders.
20+
s = cls()
21+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22+
return s
23+
24+
def process_spider_input(self, response, spider):
25+
# Called for each response that goes through the spider
26+
# middleware and into the spider.
27+
28+
# Should return None or raise an exception.
29+
return None
30+
31+
def process_spider_output(self, response, result, spider):
32+
# Called with the results returned from the Spider, after
33+
# it has processed the response.
34+
35+
# Must return an iterable of Request, or item objects.
36+
for i in result:
37+
yield i
38+
39+
def process_spider_exception(self, response, exception, spider):
40+
# Called when a spider or process_spider_input() method
41+
# (from other spider middleware) raises an exception.
42+
43+
# Should return either None or an iterable of Request or item objects.
44+
pass
45+
46+
def process_start_requests(self, start_requests, spider):
47+
# Called with the start requests of the spider, and works
48+
# similarly to the process_spider_output() method, except
49+
# that it doesn’t have a response associated.
50+
51+
# Must return only requests (not items).
52+
for r in start_requests:
53+
yield r
54+
55+
def spider_opened(self, spider):
56+
spider.logger.info('Spider opened: %s' % spider.name)
57+
58+
59+
class ImdbDownloaderMiddleware:
60+
# Not all methods need to be defined. If a method is not defined,
61+
# scrapy acts as if the downloader middleware does not modify the
62+
# passed objects.
63+
64+
@classmethod
65+
def from_crawler(cls, crawler):
66+
# This method is used by Scrapy to create your spiders.
67+
s = cls()
68+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
69+
return s
70+
71+
def process_request(self, request, spider):
72+
# Called for each request that goes through the downloader
73+
# middleware.
74+
75+
# Must either:
76+
# - return None: continue processing this request
77+
# - or return a Response object
78+
# - or return a Request object
79+
# - or raise IgnoreRequest: process_exception() methods of
80+
# installed downloader middleware will be called
81+
return None
82+
83+
def process_response(self, request, response, spider):
84+
# Called with the response returned from the downloader.
85+
86+
# Must either;
87+
# - return a Response object
88+
# - return a Request object
89+
# - or raise IgnoreRequest
90+
return response
91+
92+
def process_exception(self, request, exception, spider):
93+
# Called when a download handler or a process_request()
94+
# (from other downloader middleware) raises an exception.
95+
96+
# Must either:
97+
# - return None: continue processing this exception
98+
# - return a Response object: stops process_exception() chain
99+
# - return a Request object: stops process_exception() chain
100+
pass
101+
102+
def spider_opened(self, spider):
103+
spider.logger.info('Spider opened: %s' % spider.name)
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Define your item pipelines here
2+
#
3+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
4+
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5+
6+
7+
# useful for handling different item types with a single interface
8+
from itemadapter import ItemAdapter
9+
import pymongo
10+
11+
class ImdbPipeline:
12+
13+
def __init__(self):
14+
self.conn = pymongo.MongoClient(
15+
'localhost',
16+
27017
17+
)
18+
19+
db = self.conn['IMDB']
20+
self.collection = db['IMDB']
21+
22+
23+
def process_item(self, item, spider):
24+
self.collection.insert(dict(item))
25+
return item
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Automatically created by: scrapy startproject
2+
#
3+
# For more information about the [deploy] section see:
4+
# https://scrapyd.readthedocs.io/en/latest/deploy.html
5+
6+
[settings]
7+
default = IMDB.settings
8+
9+
[deploy]
10+
#url = http://localhost:6800/
11+
project = IMDB
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
# Scrapy settings for IMDB project
2+
#
3+
# For simplicity, this file contains only settings considered important or
4+
# commonly used. You can find more settings consulting the documentation:
5+
#
6+
# https://docs.scrapy.org/en/latest/topics/settings.html
7+
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
8+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
9+
10+
BOT_NAME = 'IMDB'
11+
12+
SPIDER_MODULES = ['IMDB.spiders']
13+
NEWSPIDER_MODULE = 'IMDB.spiders'
14+
15+
16+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
17+
USER_AGENT = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
18+
19+
# Obey robots.txt rules
20+
ROBOTSTXT_OBEY = True
21+
22+
# Configure maximum concurrent requests performed by Scrapy (default: 16)
23+
#CONCURRENT_REQUESTS = 32
24+
25+
# Configure a delay for requests for the same website (default: 0)
26+
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
27+
# See also autothrottle settings and docs
28+
#DOWNLOAD_DELAY = 3
29+
# The download delay setting will honor only one of:
30+
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
31+
#CONCURRENT_REQUESTS_PER_IP = 16
32+
33+
# Disable cookies (enabled by default)
34+
#COOKIES_ENABLED = False
35+
36+
# Disable Telnet Console (enabled by default)
37+
#TELNETCONSOLE_ENABLED = False
38+
39+
# Override the default request headers:
40+
#DEFAULT_REQUEST_HEADERS = {
41+
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42+
# 'Accept-Language': 'en',
43+
#}
44+
45+
# Enable or disable spider middlewares
46+
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
47+
#SPIDER_MIDDLEWARES = {
48+
# 'IMDB.middlewares.ImdbSpiderMiddleware': 543,
49+
#}
50+
51+
# Enable or disable downloader middlewares
52+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
53+
#DOWNLOADER_MIDDLEWARES = {
54+
# 'IMDB.middlewares.ImdbDownloaderMiddleware': 543,
55+
#}
56+
57+
# Enable or disable extensions
58+
# See https://docs.scrapy.org/en/latest/topics/extensions.html
59+
#EXTENSIONS = {
60+
# 'scrapy.extensions.telnet.TelnetConsole': None,
61+
#}
62+
63+
# Configure item pipelines
64+
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
65+
ITEM_PIPELINES = {
66+
'IMDB.pipelines.ImdbPipeline': 300,
67+
}
68+
69+
# Enable and configure the AutoThrottle extension (disabled by default)
70+
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
71+
#AUTOTHROTTLE_ENABLED = True
72+
# The initial download delay
73+
#AUTOTHROTTLE_START_DELAY = 5
74+
# The maximum download delay to be set in case of high latencies
75+
#AUTOTHROTTLE_MAX_DELAY = 60
76+
# The average number of requests Scrapy should be sending in parallel to
77+
# each remote server
78+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
79+
# Enable showing throttling stats for every response received:
80+
#AUTOTHROTTLE_DEBUG = False
81+
82+
# Enable and configure HTTP caching (disabled by default)
83+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
84+
#HTTPCACHE_ENABLED = True
85+
#HTTPCACHE_EXPIRATION_SECS = 0
86+
#HTTPCACHE_DIR = 'httpcache'
87+
#HTTPCACHE_IGNORE_HTTP_CODES = []
88+
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

0 commit comments

Comments
 (0)