苏宁的爬取和京东的爬取是一样的,方法类似
这是爬取京东的例子:
https://blog.csdn.net/Dream____Fly/article/details/99698222
现在分析苏宁的首页,这个页面还算比较
这个很容易获取,获取之后在前面拼接https就行了
到这里就可以看代码操作了:
import requests
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
}
def two_request(new_url):
print(new_url)
# two_response = requests.get(url=new_url,headers=headers,verify=False)
# two_soup = BeautifulSoup(two_response.text,'lxml')
#需要什么加什么
def first_resquest(first_response):
soup = BeautifulSoup(first_response.text,'lxml')
first_body = soup.select('.u-items-list > .f-rt-list > ul > li > a')
for num in first_body:
new_url = 'https:' + str(num['href'])
#发起二次请求
two_request(new_url)
def main():
url = 'https://pindao.suning.com/city/caidian.html?safp=d488778a.homepage1.99345513004.6'
#第一次请求,获得请求
first_response = requests.get(url=url,headers=headers,verify=False)
first_resquest(first_response)
if __name__ == '__main__':
main()
接下来获取苏宁易购的商品评论,这里需要抓取json包
到这里应该已经完成了所有的需求
2.苏宁易购直接获取商品的评论
import urllib.request
import json,jsonpath
url = 'https://review.suning.com/ajax/cluster_review_lists/general-30075272-000000000627657477-0000000000-total-2-default-10-----reviewList.htm?callback=reviewList'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
}
request = urllib.request.Request(url=url,headers=headers)
content = urllib.request.urlopen(request).read().decode('utf8')
# print('*'*100)
# print(content)
# print('*'*100)
content = content.strip('reviewList()')
# print('-'*100)
# print(content)
# print('-'*100)
obj = json.loads(content)
#找到所有的品论列表
comments = obj['commodityReviews']
fp = open('苏宁评论.txt','w',encoding='utf8')
for comment in comments:
#评论时间
publishTime = comment['publishTime']
#用户
nickname = comment['userInfo']['nickName']
#评论内容
content = comment['content']
#图片地址
is_have = comment['picVideoFlag']
if is_have == True:
image_src = jsonpath.jsonpath(comment,'$..imageInfo[*].url')
else:
image_src = "无"
#保存
item = {
'评论时间':publishTime,
'用户':nickname,
'评论内容':content,
'图片地址':image_src,
}
string = str(item)
fp.write(string + '\n')