首页 > 编程 > Python > 正文

Python实现爬虫从网络上下载文档的实例代码

2020-02-15 21:49:26
字体:
来源:转载
供稿:网友

最近在学习Python,自然接触到了爬虫,写了一个小型爬虫软件,从初始Url解析网页,使用正则获取待爬取链接,使用beautifulsoup解析获取文本,使用自己写的输出器可以将文本输出保存,具体代码如下:

Spider_main.py

# coding:utf8from baike_spider import url_manager, html_downloader, html_parser, html_outputerclass SpiderMain(object):  def __init__(self):    self.urls = url_manager.UrlManager()    self.downloader = html_downloader.HtmlDownloader()    self.parser = html_parser.HtmlParser()    self.outputer = html_outputer.HtmlOutputer()  def craw(self, root_url):    count = 1    self.urls.add_new_url(root_url)    while self.urls.has_new_url():      print("self.urls.has %s" % self.urls.new_urls)      try:        new_url = self.urls.get_new_url()        print("craw %d : %s"%(count, new_url))        html_cont = self.downloader.download(new_url)        new_urls, new_data = self.parser.parse(new_url, html_cont)        self.urls.add_new_urls(new_urls)        self.outputer.collect_data(new_data)        if count == 1000:          break        count = count + 1      except:        print("craw failed")    self.outputer.output_html()    self.outputer.output_txt()if __name__ == '__main__':  root_url = "http://www.shushu8.com/jiangnan/longzu2qianzhuan/1"  obj_spider = SpiderMain()  obj_spider.craw(root_url)

url_manager.py

class UrlManager(object):  def __init__(self):    self.new_urls = set()    self.old_urls = set()  def add_new_url(self, url):    print(url)    if url is None:      return    if url not in self.new_urls and url not in self.old_urls:      self.new_urls.add(url)  def has_new_url(self):    return len(self.new_urls) != 0  def get_new_url(self):    new_url = self.new_urls.pop()    self.old_urls.add(new_url)    # print('new url is %s' % new_url)    return new_url  def add_new_urls(self, urls):    print("add_new_urls %s" % urls)    if urls is None or len(urls) == 0:      return    for url in urls:      self.add_new_url(url)      print(url)

html_parser.py

import reimport urllib.parsefrom bs4 import BeautifulSoupclass HtmlParser(object):  def parse(self, page_url, html_cont):    if page_url is None or html_cont is None:      return    soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')    new_urls = self._get_new_urls(page_url, soup)    print("parse new_urls %s" % new_urls)    new_data = self._get_new_data(page_url, soup)    return new_urls, new_data  def _get_new_data(self, page_url, soup):    res_data = {}    res_data['url'] = page_url    print(page_url)    title_node = soup.find(class_="title").find("h1")    print(title_node.get_text())    res_data['title'] = title_node.get_text()    print("_get_new_data")    summary_node = soup.find('pre')    print(summary_node.get_text())    res_data['summary'] = summary_node.get_text()    return res_data  def _get_new_urls(self, page_url, soup):    new_urls = set()    links = soup.find_all('a', href=re.compile(r"/jiangnan/"))    print(links)    for link in links:      new_url = link['href']      new_full_url = urllib.parse.urljoin(page_url, new_url)      new_urls.add(new_full_url)      # print(new_full_url)    return new_urls            
发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表