My spider yields certain data but sometimes it doesn't find the data. Instead of setting a condition such as below:
if response.xpath('//div[@id="mitten"]//h1/text()').extract_first():
result['name'] = response.xpath('//div[@id="mitten"]//h1/text()').extract_first()
I'd rather fix this in my pipeline by removing all items that have a None
value. I've tried to do this by the following code:
class BasicPipeline(object):
""" Basic pipeline for scrapers """
def __init__(self):
self.seen = set()
def process_item(self, item, spider):
item = dict((k,v) for k,v in item.iteritems() if v is not None)
item['date'] = datetime.date.today().strftime("%d-%m-%y")
for key, value in item.iteritems():
if isinstance(value, basestring):
item[key] = value.strip() # strip every value of the item
# If an address is a list, convert it to a string
if "address" in item:
if isinstance(item['address'], list): # check if address is a list
item['address'] = u", ".join(line.strip() for line in item['address'] if len(line.strip()) > 0)
# Determine the currency of the price if possible
if "price" in item:
if u'€' in item['price'] or 'EUR' in item['price']:
item['currency'] = 'EUR'
elif u'$' in result['price'] or 'USD' in item['price']:
item['currency'] = 'USD'
# Extract e-mails from text
if "email" in item:
if isinstance(item['email'], list): # check if email is a list
item['email'] = u" ".join(line.strip() for line in item['email']) # convert to a string
regex = r"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+"
item['email'] = u";".join(line.strip() for line in re.findall(regex, item['email']))
if "mailto:" in item['email']:
item['email'] = item.replace("mailto:","")
if "phone" in item or "email" in item:
return item
else:
DropItem("No contact details: %s" %item)
However, this results in an error:
2018-03-05 10:11:03 [scrapy] ERROR: Error caught on signal handler: <bound method ?.item_scraped of <scrapy.extensions.feedexport.FeedExporter object at 0x103c14dd0>>
Traceback (most recent call last):
File "/Users/casper/Documents/crawling/env/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "/Users/casper/Documents/crawling/env/lib/python2.7/site-packages/scrapy/xlib/pydispatch/robustapply.py", line 57, in robustApply
return receiver(*arguments, **named)
File "/Users/casper/Documents/crawling/env/lib/python2.7/site-packages/scrapy/extensions/feedexport.py", line 193, in item_scraped
slot.exporter.export_item(item)
File "/Users/casper/Documents/crawling/env/lib/python2.7/site-packages/scrapy/exporters.py", line 184, in export_item
self._write_headers_and_set_fields_to_export(item)
File "/Users/casper/Documents/crawling/env/lib/python2.7/site-packages/scrapy/exporters.py", line 199, in _write_headers_and_set_fields_to_export
self.fields_to_export = list(item.fields.keys())
AttributeError: 'NoneType' object has no attribute 'fields'
I think it has to do with the fact that a field has been yielded to the pipeline but not returned at the end but that's just a guess.
Currently the pipeline has statements such as:
if "website" in item:
# Do stuff
And I'd like to prevent adding unnecessary extra statements to check if the value is None
.