4

I have a list of ~2211 start urls and scrapy crawls some, but not all of them. When I set the start_url as a single url it crawl the URL, if I have the URL in a large list, scrapy does not crawl.

Is there a limitation set to start_urls?

My Code:

from pymongo import MongoClient
import re
from scrapy.selector import Selector
#from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from mongo.items import MongoItem
import scrapy
import json
from scrapy.http import Request
from bs4 import BeautifulSoup as BS


uri = "mongodb://asdf@asdf.ac.commerce.com:23423423/"
client = MongoClient(uri)
db = client['page_content']
collection3 = db['category_page_content']
copyblocks3 = collection3.distinct('cwc')
copyblockss = str(copyblocks3)

hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', copyblockss)

class MongoSpider(scrapy.Spider):
    name = "collections3"
    allowed_domains = ["www.ecommerce.com"]
    handle_httpstatus_list = [502, 503, 504, 400, 408, 404]
    start_urls = hrefs

    def parse(self, response):
        hxs = Selector(response)
        sites = response.selector.xpath('//html')
        items = []

        if response.status == 404:
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('/html/head/title/text()[contains(.,"invalid")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('//head/link[@rel="canonical"]/@href[contains(.,"invalid-category-id")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        else:
            if hxs.xpath('//*[@class="result-summary-container"]/text()[contains(.,"Showing 0 of")]'):
                for site in sites:
                    item = MongoItem()
                    item['url'] = response.url
                    item['status'] = response.status
                    item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                    items.append(item)

                    htmlvar = item['original_url']
                    change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                    alldata = dict()
                    cwcblockdic = ""
                    for a in change_list:
                        alldata.update(a)
                    ids = alldata['_id']
                    cwcblock = alldata['cwc']
                    cwcblockdic = cwcblockdic + cwcblock

                    soup = BS(cwcblockdic)
                    wholehref = soup.find(href=htmlvar)
                    try:
                        anchortext = soup.findAll(href=htmlvar)[0].text
                    except:
                        anchortext = wholehref.get_text()
                    soup.find(href=htmlvar).replaceWith(anchortext)
                    soup = str(soup)
                    newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                    print "this is the anchor:", anchortext
                    print "this is the href:", wholehref
                    print "this is newlist:", newlist
                    print "this is the id:", ids
                    print "this is pagetype: CP"

                    for item in change_list:
                        item['cwc'] = newlist
                        collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
                return items
alecxe
  • 462,703
  • 120
  • 1,088
  • 1,195
E liquid Vape
  • 421
  • 2
  • 5
  • 15
  • Please make the question more specific providing the code of your spider and a sample list of urls to test against. Otherwise - it is too broad. Thanks. – alecxe Dec 30 '14 at 22:00
  • Could you provide us with a list of urls where some of them are not crawled? Thanks. – alecxe Dec 30 '14 at 23:21

2 Answers2

2

This may be one of the reasons, but still a valid one: there are duplicate urls in the list of urls:

>>> urls = [...]  # list of urls you've posted
>>> len(urls)
2221
>>> len(set(urls))
1177

And Scrapy would filter duplicate requests by default.

alecxe
  • 462,703
  • 120
  • 1,088
  • 1,195
  • That is a very valid point, ty. Unfortunately, the URLs not crawled were not duplicates. I will be using set to cleanse the URL list before sending to start_urls though, TY! – E liquid Vape Dec 31 '14 at 00:31
  • @EliquidVape good, thanks, do you still see not all of the urls crawled? Please show the Scrapy's report at the end of crawling. – alecxe Dec 31 '14 at 00:33
  • Thank you for the followup, Scrapy still does not crawl all URLS, but the mongo database no longer exists. – E liquid Vape Jan 06 '15 at 19:36
  • 1
    Any leads in this? I am stuck in an exact same situation. :( – Ayushi Dalmia Jan 23 '17 at 16:24
  • I basically have the same problem ~ 450 start_urls and the crawler makes only 21 requests (no errors) – tech4242 Apr 23 '17 at 10:05
0

If you are using

def start_requests(self):

    ''' Your start url logic '''

    yield scrapy.Requests(url=url, callback=self.parse, dont_filter=True)
Vishnu Kiran
  • 620
  • 7
  • 15