1

I am trying to generate a csv file for each scraped url from a list of urls in scrapy. I do understand I shall modify pipeline.py, however all my attempts have failed so far. I do not understand how I can pass the url being scraped to the pipeline and use this as name for the output and split the output accordingly.

Any help?

Thanks

Here the spider and the pipeline

from scrapy import Spider
from scrapy.selector import Selector 
from vApp.items import fItem


class VappSpider(Spider):

    name = "vApp"
    allowed_domains = ["google.co.uk"]
    start_urls = [l.strip() for l in open('data/listOfUrls.txt').readlines()]


def parse(self, response):

    trs = Selector(response).xpath('//[@id="incdiv"]/table/tbody/tr')
    for tr in trs:
        item = fItem()

        try:
            item['item'] = tr.xpath('td/text()').extract()[0]
        except IndexError:
            item['item'] = 'null'

        yield item

Pipeline:

from scrapy import signals
from scrapy.contrib.exporter import CsvItemExporter

class VappPipeline(object):
    def __init__(self):
        self.files = {}

@classmethod
    def from_crawler(cls, crawler):
       pipeline = cls()
       crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
       crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
       return pipeline

   def spider_opened(self, spider):
        file = open('results/%s.csv' % spider.name, 'w+b')
        self.files[spider] = file
        self.exporter = CsvItemExporter(file)
        self.exporter.fields_to_export = ['item']
        self.exporter.start_exporting()

    def spider_closed(self, spider):
        self.exporter.finish_exporting()
        file = self.files.pop(spider)
        file.close()

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item
gcc
  • 75
  • 2
  • 8

1 Answers1

0

I think you should be doing all those things in batch as a post-processing step when your crawl finishes instead of per-item but here's a draft on how you could do what you want:

from scrapy import Spider
from scrapy.selector import Selector 
from vApp.items import fItem


class VappSpider(Spider):

    name = "vApp"
    allowed_domains = ["google.co.uk"]
    start_urls = [l.strip() for l in open('data/listOfUrls.txt').readlines()]


def parse(self, response):

    trs = Selector(response).xpath('//[@id="incdiv"]/table/tbody/tr')
    for tr in trs:
        item = fItem()

        try:
            item['item'] = tr.xpath('td/text()').extract()[0]
        except IndexError:
            item['item'] = 'null'
        item['url'] = response.url
        yield item


from scrapy import signals
from scrapy.contrib.exporter import CsvItemExporter
from urlparse import urlparse

class VappPipeline(object):
    def __init__(self):
        self.files = {}
        self.exporter = {}

    @classmethod
    def from_crawler(cls, crawler):
       pipeline = cls()
       crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
       return pipeline

    def process_item(self, item, spider):
        url = item['url']
        parsed_uri = urlparse(url)
        domain = parsed_uri.netloc
        if domain not in self.exporter:
            file = open('results/%s.csv' % domain, 'w+b')
            self.files[domain] = file
            self.exporter[domain] = CsvItemExporter(file)
            self.exporter[domain].fields_to_export = ['item']
            self.exporter[domain].start_exporting()

        assert domain in self.exporter

        self.exporter[domain].export_item(item)

        return item

    def spider_closed(self, spider):
        for domain, exporter in self.exporter.iteritems():
            exporter.finish_exporting()
            self.files[domain].close()
neverlastn
  • 2,164
  • 16
  • 23
  • Thanks @neverlastn. I have tried your solution, but is giving me lost of errors. I think you are right. I should post-process the data after the crawling. Especially after I found out that data per row are exported not in an order manner. Each row of the scraped table does not seem to be scraped orderly, but rater some rows from other addresses are exported after the crawler has crawled the another url. So basically my table looks like row1url1, row1url2, row2url1, row2url1... – gcc Aug 22 '16 at 15:00