爬取500px网站图片并保存到本地。
软件:Pycharm
库:requests,fakerelative_useragent
网站地址:https : //500px.com/popular
先拿到一个网站,先看看目标网站是加载还是动态加载的。
右边会显示并为下拉选择一般滚动条,然后发现,它是没有页码的,这样会自动打开加载者该网站的动态加载方式,或者还可以复制一个,Ctrl+U查看源代码,Ctrl+ f打开搜索框,把地址贴进去,会链接找不到这个链接,这样发现就确定加载链接了。
在这里找到图片链接,向滚动条,这里会再次加载的内容。
在这里找到图片链接,向滚动条,这里会再次加载的内容。
这个就是网页的真实链接。
复制下来这几个地址分析:
第一个:https://api.500px.com/v1/photos?rpp=50&feature=popular&image_size%5B%5D=1&image_size%5B%5D=2&image_size%5B%5D=32&image_size%5B%5D=31&image_size%5B%5D=33&image_size%5B%5D=34&image_size%5B%5D=35&image_size%5B%5D=36&image_size%5B%5D=2048&image_size%5B%5D=4&image_size%5B%5D=14&sort=&include_states=true&include_licensing=true&formats=jpeg%2Clytro&only=&exclude=&personalized_categories=&page=1&rpp=50
第二个:https://api.500px.com/v1/photos?rpp=50&feature=popular&image_size%5B%5D=1&image_size%5B%5D=2&image_size%5B%5D=32&image_size%5B%5D=31&image_size%5B%5D=33&image_size%5B%5D=34&image_size%5B%5D=35&image_size%5B%5D=36&image_size%5B%5D=2048&image_size%5B%5D=4&image_size%5B%5D=14&sort=&include_states=true&include_licensing=true&formats=jpeg%2Clytro&only=All+photographers%2CPulse&exclude=&personalized_categories=&page=2&rpp=50
会发现第一页是:page=1,第二页是:page=2……但是还有其他地方有什么不一样的,经过验证出的,发现每一页都没有一样的规律。
同一个ip地址去多次访问会面临被封掉的风险,这里采用fake_agent,产生随时进行访问的User-Agent请求头。
import requests
from fake_useragent import UserAgent
filename=0
class photo_spider(object):
def __init__(self):
self.url = 'https://api.500px.com/v1/photos?rpp=50&feature=popular&image_size%5B%5D=1&image_size%5B%5D=2&image_size%5B%5D=32&image_size%5B%5D=31&image_size%5B%5D=33&image_size%5B%5D=34&image_size%5B%5D=35&image_size%5B%5D=36&image_size%5B%5D=2048&image_size%5B%5D=4&image_size%5B%5D=14&sort=&include_states=true&include_licensing=true&formats=jpeg%2Clytro&only=&exclude=&personalized_categories=&page={}&rpp=50'
ua = UserAgent(verify_ssl=False)
#随机产生user-agent
for i in range(1, 100):
self.headers = {
'User-Agent': ua.random
}
def mian(self):
pass
if __name__ == '__main__':
spider = photo_spider()
spider.main()
def get_html(self,url):
response=requests.get(url,headers=self.headers)
html=response.json()#动态加载的json数据
return html
def get_imageUrl(self,html):
global filename
content_list=html['photos']
for content in content_list:
image_url=content['image_url']
#print(image_url[8])
imageUrl=image_url[8]
r=requests.get(imageUrl,headers=self.headers)
with open('F:/pycharm文件/photo/'+str(filename)+'.jpg','wb') as f:
f.write(r.content)
filename+=1
这里说明一下,由于imageUrl=image_url[8]这里有多个image-url。
def main(self):
start = int(input('输入开始页:'))
end = int(input('输入结束页:'))
for page in range(start, end + 1):
print('第%s页内容' % page)
url = self.url.format(page)#{}传入page即页码
html=self.get_html(url)
self.get_imageUrl(html)
print('第%s页爬取完成'%page)
打开本地F:/pycharm文件/照片/
import requests
from fake_useragent import UserAgent
filename=0
class photo_spider(object):
def __init__(self):
self.url = 'https://api.500px.com/v1/photos?rpp=50&feature=popular&image_size%5B%5D=1&image_size%5B%5D=2&image_size%5B%5D=32&image_size%5B%5D=31&image_size%5B%5D=33&image_size%5B%5D=34&image_size%5B%5D=35&image_size%5B%5D=36&image_size%5B%5D=2048&image_size%5B%5D=4&image_size%5B%5D=14&sort=&include_states=true&include_licensing=true&formats=jpeg%2Clytro&only=&exclude=&personalized_categories=&page={}&rpp=50'
ua = UserAgent(verify_ssl=False)
for i in range(1, 100):
self.headers = {
'User-Agent': ua.random
}
def get_html(self,url):
response=requests.get(url,headers=self.headers)
html=response.json()
return html
def get_imageUrl(self,html):
global filename
content_list=html['photos']
for content in content_list:
image_url=content['image_url']
#print(image_url[8])
imageUrl=image_url[8]
r=requests.get(imageUrl,headers=self.headers)
with open('F:/pycharm文件/photo/'+str(filename)+'.jpg','wb') as f:
f.write(r.content)
filename+=1
def main(self):
start = int(input('输入开始:'))
end = int(input('输入结束页:'))
for page in range(start, end + 1):
print('第%s页' % page)
url = self.url.format(page)
html=self.get_html(url)
self.get_imageUrl(html)
if __name__ == '__main__':
spider = photo_spider()
spider.main()
联系客服