首页 > 编程 > Python > 正文

Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息示例

2020-02-15 22:26:23
字体:
来源:转载
供稿:网友

本文实例讲述了Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息。分享给大家供大家参考,具体如下:

import timefrom multiprocessing import Poolfrom selenium import webdriverfrom selenium.webdriver.common.by import Byfrom selenium.webdriver.support.ui import WebDriverWaitfrom selenium.webdriver.support import expected_conditions as ECfrom selenium.common.exceptions import TimeoutExceptionfrom bs4 import BeautifulSoupfrom pymongo import MongoClientfrom pymongo.errors import PyMongoError# monogdb配置信息MONGO_HOST = "localhost"MONGO_DATABASE = "douyu"MONGO_TABLE = "zhibo"client = MongoClient(host=MONGO_HOST)db = client[MONGO_DATABASE]# PhantomJS 命令行相关配置# 参见 http://phantomjs.org/api/command-line.htmlSERVICE_ARGS = ['--disk-cache=true', '--load-images=false']# driver = webdriver.Chrome() # 有界面driver = webdriver.PhantomJS(service_args=SERVICE_ARGS) # 无界面delay = 10wait = WebDriverWait(driver, delay)driver.maximize_window()def get_total_pages():  url = 'https://www.douyu.com/directory/all'  driver.get(url)  pages = int(driver.find_element_by_css_selector(    '.shark-pager-dot + .shark-pager-item').text)  print("正在获取第1页数据")  room_list = get_rooms_by_beautifulsoup()  save_to_monogodb(room_list)  return pages# 根据页码获取指定页数据,并将其保存到数据库中def parse_page(page_num):  print("正在获取第%d页数据" % page_num)  try:    page_num_box = wait.until(      EC.presence_of_element_located(        (By.CSS_SELECTOR, "input.jumptxt")))    go_btn = wait.until(EC.element_to_be_clickable(      (By.CSS_SELECTOR, 'a.shark-pager-submit')))    page_num_box.clear()    page_num_box.send_keys(page_num)    go_btn.click()    # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")    # time.sleep(0.1)    wait.until(      EC.text_to_be_present_in_element(        (By.CSS_SELECTOR,         '.shark-pager-item.current'),        str(page_num)))    # 对于By.CLASS_NAME invalid selector: Compound class names not permitted    room_list = get_rooms_by_beautifulsoup()    save_to_monogodb(room_list)  except TimeoutException:    print("请求第%d页失败" % page_num)    print("尝试重新获取第%d页" % page_num)    return parse_page(page_num)# 通过bs4解析数据def get_rooms_by_beautifulsoup():  '''  通过bs4库解析数据  获取直播间的名称,观看人数,标签,主播名  '''  wait.until(EC.presence_of_element_located(    (By.CSS_SELECTOR, "ul#live-list-contentbox > li")))  html = driver.page_source  soup = BeautifulSoup(html, 'lxml')  rooms = soup.select('ul#live-list-contentbox > li')  for room in rooms:    room_name = room.find(      'h3', attrs={        'class': 'ellipsis'}).get_text(      strip=True)    view_count = room.find('span', class_='dy-num fr').text    tag = room.find('span', class_='tag ellipsis').text    hostname = room.find('span', class_='dy-name ellipsis fl').text    #print("房间名: " + room_name + "/t观看人数: " + view_count + "/t标签: " + tag + "/t主播名: " + hostname)    yield {      'room_name': room_name,      'view_count': view_count,      'tag': tag,      'hostname': hostname,    }def save_to_monogodb(room_list):  for room in room_list:    try:      db[MONGO_TABLE].insert(room)  # insert支持插入多条数据      print("mongodb插入数据成功:", room)    except PyMongoError as e:      print("mongodb插入数据失败:", room, e)if __name__ == '__main__':  try:    total_pages = get_total_pages()    for page_num in range(2, total_pages + 1):      parse_page(page_num)  except Exception as e:    print("出错了", e)  finally: # 确保 浏览器能正常关闭    print("共有%d页" % total_pages)    driver.close()            
发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表