I am trying to scrape a website and save the information and I have two issues at the moment.
For one, when I am using selenium to click buttons (in this case a load more results button) it is not clicking until the end and I can't seem to figure out why.
And the other issue is that it is not saving to a csv file in the parse_article function.
Here is my code:
import scrapy
from selenium import webdriver
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from selenium.webdriver.common.by import By
import csv
class ProductSpider(scrapy.Spider):
name = "Southwestern"
allowed_domains = ['www.reuters.com/']
start_urls = [
'https://www.reuters.com/search/news?blob=National+Health+Investors%2c+Inc.']
def __init__(self):
self.driver = webdriver.Chrome()
def parse(self, response):
self.driver.get(response.url)
while True:
next = self.driver.find_element_by_class_name(
"search-result-more-txt")
#next = self.driver.find_element_by_xpath('//*[@id="content"]/section[2]/div/div[1]/div[4]/div/div[4]/div[1]')
# maybe do it with this
#button2 = driver.find_element_by_xpath("//*[contains(text(), 'Super')]")
try:
next.click()
# get the data and write it to scrapy items
except:
break
SET_SELECTOR = '.search-result-content'
for articles in self.driver.find_elements(By.CSS_SELECTOR, SET_SELECTOR):
item = {}
# get the date
item["date"] = articles.find_element_by_css_selector('h5').text
# title
item["title"] = articles.find_element_by_css_selector('h3 a').text
item["link"] = articles.find_element_by_css_selector(
'a').get_attribute('href')
print(item["link"])
yield scrapy.Request(url=item["link"], callback=self.parse_article, meta={'item': item})
self.driver.close()
def parse_article(self, response):
item = response.meta['item']
texts = response.xpath(
"//div[contains(@class, 'StandardArticleBody')]//text()").extract()
if "National Health Investors" in texts:
item = response.meta['item']
row = [item["date"], item["title"], item["link"]]
with open('Websites.csv', 'w') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(row)