I'm a beginner in web spider and i am so confused these days when using aiohttp. Here is my code:
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1',
'Referer': 'https://www.mzitu.com/',
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
'Accept-Encoding': 'gzip',
}
class MZiTu(object):
def __init__(self):
self.timeout = 5
self.file_path = 'D:\mzitu'
self.common_page_url = 'https://www.mzitu.com/page/'
self.total_page_num = 0
self.end_album_num = 0
self.session = None
async def start(self):
async with aiohttp.ClientSession(headers=header) as mzt.session:
for page in range(1, self.total_page_num+1):
await self.crawlAlbum(self.common_page_url, page)
async def crawlAlbum(self, common_url, page_num):
page_url = self.common_page_url + str(page_num)
async with self.session.get(page_url, timeout=self.timeout) as resp:
html = await resp.text()
bsop = BeautifulSoup(html, 'lxml')
album_items = bsop.find('ul', {'id': 'pins'}).findAll('li')
for item in album_items:
try:
album_title = item.find('img').attrs['alt']
album_url = item.find('a').attrs['href']
if not os.path.exists(os.path.join(self.file_path, album_title)):
os.mkdir(os.path.join(self.file_path, album_title))
os.chdir(os.path.join(self.file_path, album_title))
await self.crawlImgs(album_url)
except:
continue
async def crawlImgs(self, album_url):
self.end_album_num = await self.getAlbumTotalNum(album_url)
for i in range(1, self.end_album_num+1):
img_page_url = album_url + str(i)
async with self.session.get(img_page_url, timeout=self.timeout) as resq:
html = await resq.text()
bsop = BeautifulSoup(html, 'lxml')
try:
img_url = bsop.find('div', {'class': 'main-image'}).find('img').attrs['src']
await self.downloadImg(i, img_url)
except:
continue
async def getAlbumTotalNum(self, album_url):
async with self.session.get(album_url, timeout=self.timeout) as resq:
html = await resq.text()
bsop = BeautifulSoup(html, 'lxml')
total_num = int(bsop.find('div', {'class': 'nav-links'}).findAll('a', {'class': 'page-numbers'})[-2].text)
return total_num
async def downloadImg(self,index, img_url):
async with self.session.get(img_url, timeout=self.timeout) as resq:
content = await resq.read()
async with aiofiles.open(str(index)+'.jpg', 'wb') as f:
await f.write(content)
if __name__ == "__main__":
mzt = MZiTu()
mzt.total_page_num = 2
loop = asyncio.get_event_loop()
to_do = [mzt.start()]
wait_future = asyncio.wait(to_do)
loop.run_until_complete(wait_future)
loop.close()
my code return directly at the first line below,why? so confused
async def getAlbumTotalNum(self, album_url):
async with self.session.get(album_url, timeout=self.timeout) as resq:
html = await resq.text()
bsop = BeautifulSoup(html, 'lxml')
total_num = int(bsop.find('div', {'class': 'nav-links'}).findAll('a', {'class': 'page-numbers'})[-2].text)
return total_num
i can't find any errors in my program. so confused. so confused. if there are some Learning materials about aiohttp and asyncio, i feel so difficult.