首页 > 编程 > Python > 正文

python爬虫实例详解

2020-02-15 21:55:35
字体:
来源:转载
供稿:网友

本篇博文主要讲解Python爬虫实例,重点包括爬虫技术架构,组成爬虫的关键模块:URL管理器、HTML下载器和HTML解析器。

爬虫简单架构

程序入口函数(爬虫调度段)

#coding:utf8import time, datetimefrom maya_Spider import url_manager, html_downloader, html_parser, html_outputerclass Spider_Main(object): #初始化操作 def __init__(self):  #设置url管理器  self.urls = url_manager.UrlManager()  #设置HTML下载器  self.downloader = html_downloader.HtmlDownloader()  #设置HTML解析器  self.parser = html_parser.HtmlParser()  #设置HTML输出器  self.outputer = html_outputer.HtmlOutputer() #爬虫调度程序 def craw(self, root_url):  count = 1  self.urls.add_new_url(root_url)  while self.urls.has_new_url():   try:    new_url = self.urls.get_new_url()    print('craw %d : %s' % (count, new_url))    html_content = self.downloader.download(new_url)    new_urls, new_data = self.parser.parse(new_url, html_content)    self.urls.add_new_urls(new_urls)    self.outputer.collect_data(new_data)    if count == 10:     break    count = count + 1   except:    print('craw failed')  self.outputer.output_html()if __name__ == '__main__': #设置爬虫入口 root_url = 'http://baike.baidu.com/view/21087.htm' #开始时间 print('开始计时..............') start_time = datetime.datetime.now() obj_spider = Spider_Main() obj_spider.craw(root_url) #结束时间 end_time = datetime.datetime.now() print('总用时:%ds'% (end_time - start_time).seconds)

URL管理器

class UrlManager(object): def __init__(self):  self.new_urls = set()  self.old_urls = set() def add_new_url(self, url):  if url is None:   return  if url not in self.new_urls and url not in self.old_urls:   self.new_urls.add(url) def add_new_urls(self, urls):  if urls is None or len(urls) == 0:   return  for url in urls:   self.add_new_url(url) def has_new_url(self):  return len(self.new_urls) != 0 def get_new_url(self):  new_url = self.new_urls.pop()  self.old_urls.add(new_url)  return new_url

网页下载器

import urllibimport urllib.requestclass HtmlDownloader(object): def download(self, url):  if url is None:   return None  #伪装成浏览器访问,直接访问的话csdn会拒绝  user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'  headers = {'User-Agent':user_agent}  #构造请求  req = urllib.request.Request(url,headers=headers)  #访问页面  response = urllib.request.urlopen(req)  #python3中urllib.read返回的是bytes对象,不是string,得把它转换成string对象,用bytes.decode方法  return response.read().decode()

网页解析器

import reimport urllibfrom urllib.parse import urlparsefrom bs4 import BeautifulSoupclass HtmlParser(object): def _get_new_urls(self, page_url, soup):  new_urls = set()  #/view/123.htm  links = soup.find_all('a', href=re.compile(r'/item/.*?'))  for link in links:   new_url = link['href']   new_full_url = urllib.parse.urljoin(page_url, new_url)   new_urls.add(new_full_url)  return new_urls #获取标题、摘要 def _get_new_data(self, page_url, soup):  #新建字典  res_data = {}  #url  res_data['url'] = page_url  #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>获得标题标签  title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1')  print(str(title_node.get_text()))  res_data['title'] = str(title_node.get_text())  #<div class="lemma-summary" label-module="lemmaSummary">  summary_node = soup.find('div', class_="lemma-summary")  res_data['summary'] = summary_node.get_text()  return res_data def parse(self, page_url, html_content):  if page_url is None or html_content is None:   return None  soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')  new_urls = self._get_new_urls(page_url, soup)  new_data = self._get_new_data(page_url, soup)  return new_urls, new_data            
发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表