使用selenium完成爬虫操作
示例:获取京东的信息
Xpath方法
"""
long long away
Date:2022/5/16 15:17
"""
from selenium.webdriver import Chrome,ChromeOptions
from selenium.webdriver.common.keys import Keys
import time,csv
from lxml import etree
# 1.创建配置对象
options = ChromeOptions()
# 设置取消测试环境
options.add_experimental_option('excludeSwitches', ['enable-automation'])
# 设置取消图片加载
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
# 2.创建浏览器
b = Chrome(options=options)
b.get('https://www.jd.com/')
# 一、控制输入框输入内容
# 1.获取输入框
search = b.find_element_by_xpath('/html/body/div[1]/div[4]/div/div[2]/div/div[2]/input')
# 2.输入内容
search.send_keys('电脑')
# 3.按回车
search.send_keys(Keys.ENTER)
time.sleep(1)
# 二、页面滚动
# js实现滚动操作:window.scrollBy(x方向的偏移量, y方向偏移量)
# window.scroll
with open('./京东电脑.csv','w',encoding='utf-8',newline='') as f:
writer = csv.writer(f)
writer.writerow(['商品名称','商品链接','商品价格','商品评价','店铺名称','店铺链接'])
s = 0
for j in range(1,11):
for i in range (6):
b.execute_script('window.scrollBy(0, 1000)')
time.sleep(1)
root = etree.HTML(b.page_source)
for k in range(1,61):
try:
list1 = []
# 商品名称
names = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[3]/a/em/text()')
list1.append(names[k-1])
# 商品链接
names_href1 = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[3]/a/@href')
list1.append('https:'+names_href1[k-1])
# 价格
price = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[2]/strong/i/text()')
list1.append(price[k-1])
# 评价
pingjia = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[4]/strong/a/text()')
list1.append(pingjia[k-1])
# 店铺名称
shop_name = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[5]/span/a/text()')
list1.append(shop_name[k-1])
# 店铺链接
shop_href = root.xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[2]/ul/li/div/div[5]/span/a/@href')
list1.append('https:'+shop_href[k-1])
writer.writerow(list1)
s += 1
print(f'爬取第{s}条')
except:
list1.append([])
# 翻页
time.sleep(2)
c = b.find_element_by_xpath('/html/body/div[5]/div[2]/div[2]/div[1]/div/div[3]/div/span[1]/a[9]')
c.click()
print('翻页了')
b.close()
仅供学习使用
版权声明:本文为qq_48276142原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。