I want to use proxy for only few specific domain. I check this, this and this. If I understand correctly setting proxy using middleware will set the proxy for all requests.
How can I set proxy for specific url before the spider request is sent?
Currently my spider is working fine with following implementation:
CoreSpider.py
class CoreSpider(scrapy.Spider):
name = "final"
def __init__(self):
self.start_urls = self.read_url()
self.rules = (
Rule(
LinkExtractor(
unique=True,
),
callback='parse',
follow=True
),
)
def read_url(self):
urlList = []
for filename in glob.glob(os.path.join("/root/Public/company_profiler/seed_list", '*.list')):
with open(filename, "r") as f:
for line in f.readlines():
url = re.sub('\n', '', line)
if "http" not in url:
url = "http://" + url
# print(url)
urlList.append(url)
return urlList
def parse(self, response):
print("URL is: ", response.url)
print("User agent is : ", response.request.headers['User-Agent'])
filename = '/root/Public/company_profiler/crawled_page/%s.html' % response.url
article = Extractor(extractor='LargestContentExtractor', html=response.body).getText()
print("Article is :", article)
if len(article.split("\n")) < 5:
print("Skipping to next url : ", article.split("\n"))
else:
print("Continue parsing: ", article.split("\n"))
ContentHandler_copy.ContentHandler_copy.start(article, response.url)
and settings.py
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'random_useragent.RandomUserAgentMiddleware': 320
}
I am running spider by calling it via script RunSpider.py
RunSpider.py
from CoreSpider import CoreSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
process = CrawlerProcess(get_project_settings())
process.crawl(CoreSpider)
process.start()
Update:
CoreSpider.py
class CoreSpider(scrapy.Spider):
name = "final"
def __init__(self):
self.start_urls = self.read_url()
self.rules = (
Rule(LinkExtractor(unique=True), callback='parse', follow=True, process_request='process_request'),
)
def process_request(self, request, spider):
print("Request is : ", request) ### Not printing anything
if 'xxx' in request.url: # <-- set proxy for this URL?
meta = request.get('meta', {})
meta.update({'proxy': 'https://159.8.18.178:8080'})
return request.replace(meta=meta)
return request
.......
I also tried setting proxy like this in process_request
method, but failed.
request.meta['proxy'] = "https://159.8.18.178:8080"
Thanks in advance.