本文所述实例可以实现基于Python的查看图片报纸《参考消息》并将当天的图片报纸自动下载到本地供查看的功能,具体实现代码如下:
# coding=gbkimport urllib2import socketimport reimport timeimport os# timeout in seconds#timeout = 10#socket.setdefaulttimeout(timeout)timeout = 10urllib2.socket.setdefaulttimeout(timeout)home_url = "http://www.hqck.net"home_page = ""try: home_page_context = urllib2.urlopen(home_url) home_page = home_page_context.read() print "Read home page finishd." print "-------------------------------------------------"except urllib2.URLError,e: print e.code exit()except: print e.code exit()reg_str = r'<a class="item-baozhi" href="/arc/jwbt/ckxx//d{4}//d{4}//w+/.html" rel="external nofollow" ><span class.+>.+</span></a>'news_url_reg = re.compile(reg_str)today_cankao_news = news_url_reg.findall(home_page)if len(today_cankao_news) == 0: print "Cannot find today's news!" exit()my_news = today_cankao_news[0]print "Latest news link = " + my_newsprinturl_s = my_news.find("/arc/")url_e = my_news.find(".html")url_e = url_e + 5print "Link index = [" + str(url_s) + "," + str(url_e) + "]"my_news = my_news[url_s:url_e]print "part url = " + my_newsfull_news_url = home_url + my_newsprint "full url = " + full_news_urlprintimage_folder = "E://new_folder//"if (os.path.exists(image_folder) == False): os.makedirs(image_folder)today_num = time.strftime('%Y-%m-%d',time.localtime(time.time()))image_folder = image_folder + today_num + "//"if (os.path.exists(image_folder) == False): os.makedirs(image_folder)print "News image folder = " + image_folderprintcontext_uri = full_news_url[0:-5]first_page_url = context_uri + ".html"try: first_page_context = urllib2.urlopen(first_page_url) first_page = first_page_context.read()except urllib2.HTTPError, e: print e.code exit()tot_page_index = first_page.find("共")tot_page_index = tot_page_indextmp_str = first_page[tot_page_index:tot_page_index+10]end_s = tmp_str.find("页")page_num = tmp_str[2:end_s]print page_numpage_count = int(page_num)print "Total " + page_num + " pages:"printpage_index = 1download_suc = Truewhile page_index <= page_count: page_url = context_uri if page_index > 1: page_url = page_url + "_" + str(page_index) page_url = page_url + ".html" print "News page link = " + page_url try: news_img_page_context = urllib2.urlopen(page_url) except urllib2.URLError,e: print e.reason download_suc = False break news_img_page = news_img_page_context.read() #f = open("e://page.html", "w") #f.write(news_img_page) #f.close() reg_str = r'http://image/S+jpg' image_reg = re.compile(reg_str) image_results = image_reg.findall(news_img_page) if len(image_results) == 0: print "Cannot find news page" + str(page_index) + "!" download_suc = False break image_url = image_results[0] print "News image url = " + image_url news_image_context = urllib2.urlopen(image_url) image_name = image_folder + "page_" + str(page_index) + ".jpg" imgf = open(image_name, 'wb') print "Getting image..." try: while True: date = news_image_context.read(1024*10) if not date: break imgf.write(date) imgf.close() except: download_suc = False print "Save image " + str(page_index) + " failed!" print "Unexpected error: " + sys.exc_info()[0] + sys.exc_info()[1] else: print "Save image " + str(page_index) + " succeed!" print page_index = page_index + 1if download_suc == True: print "News download succeed! Path = /"" + str(image_folder) + "/"" print "Enjoy it! ^^"else: print "news download failed!"
新闻热点
疑难解答