As soon as I uncomment the project middleware in settings I get an error
SPIDER_MIDDLEWARES = {
'scrapyspider.middlewares.ScrapySpiderProjectMiddleware': 543,
}
Here is my spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.item import Item, Field
class DomainLinks(Item):
links = Field()
class ScrapyProject(CrawlSpider):
name = 'scrapyspider'
#allowed_domains = []
start_urls = ['http://www.example.com']
rules = (Rule(LxmlLinkExtractor(allow=()), callback='parse_links', follow=True),)
def parse_start_url(self, response):
self.parse_links(response)
def parse_links(self, response):
item = DomainLinks()
item['links'] = []
links = LxmlLinkExtractor(allow=(),deny = ()).extract_links(response)
for link in links:
if link.url not in item['links']:
item['links'].append(link.url)
return item
Here is some text extracted from the project middleware file. process_spider_output is where I filtered internal links, and calling process_start_requests causes an error.
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
domain = response.url.strip("http://","").strip("https://","").strip("www.").strip("ww2.").split("/")[0]
filtered_result = []
for i in result:
if domain in i:
filtered_result.append(i)
# Must return an iterable of Request, dict or Item objects.
for i in filtered_result:
yield i
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
Traceback
2017-05-01 12:30:55 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapyproject.middlewares.scrapyprojectSpiderMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2017-05-01 12:30:55 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2017-05-01 12:30:55 [scrapy.core.engine] INFO: Spider opened
Unhandled error in Deferred:
2017-05-01 12:30:55 [twisted] CRITICAL: Unhandled error in Deferred:
2017-05-01 12:30:55 [twisted] CRITICAL:
Traceback (most recent call last):
File "/home/matt/.local/lib/python3.5/site-packages/twisted/internet/defer.py", line 1301, in _inlineCallbacks
result = g.send(result)
File "/home/matt/.local/lib/python3.5/site-packages/scrapy/crawler.py", line 74, in crawl
yield self.engine.open_spider(self.spider, start_requests)
TypeError: process_start_requests() takes 2 positional arguments but 3 were given
I am trying to filtered links so only internal links are followed/extracted
The scrapy documentation isn't very clear..
Thanks