首页 > 学院 > 开发设计 > 正文

网络爬虫爬取小说3

2019-11-11 02:55:03
字体:
来源:转载
供稿:网友
from urllib import requestfrom bs4 import BeautifulSoupimport redef getHtml(url ):    page = request.urlopen(url)    html_doc = page.read()    html_doc = html_doc.replace(u'/xa0', u' ')    html_doc = str(html_doc)    html_doc = html_doc.replace("<br/>","/n")    return html_docdef getTitle(soup):    return soup.title.stringdef getContent(soup):    return soup.find(id="content").get_text()def getNextURL(soup):    next_init_url = str(soup.find(id="pager_next"))    next_url = re.search("/d+/.html", next_init_url)    if next_url is None:        return False    return next_url.group()def getBook(url,name):    txt = ''    book = open("./res/"+name,"w+")    while bool(url):        html_doc = getHtml(url)        soup = BeautifulSoup(html_doc, 'html.parser')        title = soup.title.string        book_content = soup.find(id="content").get_text()        book.write(title+book_content)        if bool(getNextURL(soup)):            url = re.sub("/d+.html", getNextURL(soup), url)        else:            break    if not book.closed:        book.close()    PRint("ok")url = "http://www.biqulou.net/24/24835/7406090.html"# url = "http://www.biqulou.net/24/24835/14627850.html"getBook(url,"大主宰") 
这个是对于第三方库BeautifulSoup的使用,欢迎指教(野路子)
发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表