python从零开始写爬虫(4)-- 整合代码
2017-01-12 16:24
441 查看
把前面代码整合下,抽取出到方法里,一只新浪新闻爬虫需要代码如下:
import requests
from bs4 import BeautifulSoup
import re
import json
#获取新闻链接
def getNewsURLs(url):
newsURL = []
newurl = url
res = requests.get(newurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
for news in soup.select('.news-item'):
if len(news.select('h2'))>0:
newsURL.append(news.select('h2 a')[0]['href'])
return newsURL
#获取新闻详细信息
def getNewsDetail(newsurl):
result = {}
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
m = re.search('doc-i(.*).shtml',newsurl)
result['newsid'] = m.group(1)
if len(soup.select('#artibodyTitle'))>0:
result['title'] = soup.select('#artibodyTitle')[0].text
else:
result['title'] = ''
if len(soup.select('#navtimeSource'))>0:
result['newssource'] = soup.select('#navtimeSource')[0].contents[1].text.strip()
else:
result['newssource'] = ''
if len(soup.select('#navtimeSource'))>0:
result['dt'] = soup.select('#navtimeSource')[0].contents[0].strip()
else:
result['dt'] = ''
if len(soup.select('#artibody p'))>0:
result['article'] = ''.join(p.text.strip() for p in soup.select('#artibody p')[:-1])
else:
result['article'] = ''
if len(soup.select('.article-editor'))>0:
result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
else:
result['editor'] = ''
return result
#获取评论数
def getCommentCount(newsid):
commentURL = "http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=20"
comments = requests.get(commentURL.format(newsid))
jd = json.loads(comments.text.strip('var date='))
return jd['result']['count']['total']
测试下:(因为获取的数据太多,我就直接获取newsurl[0]第一条数据做下测试){
补充下:import re出错的话,在cmd跑下:pip install re;
import requests
from bs4 import BeautifulSoup
import re
import json
#获取新闻链接
def getNewsURLs(url):
newsURL = []
newurl = url
res = requests.get(newurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
for news in soup.select('.news-item'):
if len(news.select('h2'))>0:
newsURL.append(news.select('h2 a')[0]['href'])
return newsURL
#获取新闻详细信息
def getNewsDetail(newsurl):
result = {}
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
m = re.search('doc-i(.*).shtml',newsurl)
result['newsid'] = m.group(1)
if len(soup.select('#artibodyTitle'))>0:
result['title'] = soup.select('#artibodyTitle')[0].text
else:
result['title'] = ''
if len(soup.select('#navtimeSource'))>0:
result['newssource'] = soup.select('#navtimeSource')[0].contents[1].text.strip()
else:
result['newssource'] = ''
if len(soup.select('#navtimeSource'))>0:
result['dt'] = soup.select('#navtimeSource')[0].contents[0].strip()
else:
result['dt'] = ''
if len(soup.select('#artibody p'))>0:
result['article'] = ''.join(p.text.strip() for p in soup.select('#artibody p')[:-1])
else:
result['article'] = ''
if len(soup.select('.article-editor'))>0:
result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
else:
result['editor'] = ''
return result
#获取评论数
def getCommentCount(newsid):
commentURL = "http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=20"
comments = requests.get(commentURL.format(newsid))
jd = json.loads(comments.text.strip('var date='))
return jd['result']['count']['total']
测试下:(因为获取的数据太多,我就直接获取newsurl[0]第一条数据做下测试){
补充下:import re出错的话,在cmd跑下:pip install re;
相关文章推荐
- Python天气预报采集器实现代码(网页爬虫)
- 利用python脚本抓取AC的代码[爬虫+HTMLParser+handle_entityref+正则表达式+模拟登陆+文件操作]
- Python 实现网络爬虫 抓取静态网页【代码】
- python-12:怎么在爬虫代码中伪装header
- python3.4学习笔记(十四) 网络爬虫实例代码,抓取新浪爱彩双色球开奖数据实例
- python 网络爬虫代码
- python--爬虫入门(八)体验HTMLParser解析网页,网页抓取解析整合练习
- Python实现爬取知乎神回复简单爬虫代码分享
- 零基础写python爬虫之抓取糗事百科代码分享
- Python多线程、异步+多进程爬虫实现代码
- Python天气预报采集器实现代码(网页爬虫)
- python3简单爬虫实现代码
- 一则python3的简单爬虫代码
- Python实现爬取知乎神回复简单爬虫代码分享
- 零基础写python爬虫之抓取糗事百科代码分享
- Python天气预报采集器实现代码(网页爬虫)
- 一则python3的简单爬虫代码
- python爬虫入门教程之糗百图片爬虫代码分享
- python网络爬虫——基本概念及代码实现1
- python爬虫代码