python多线程抓取天涯帖子内容示例
使用re, urllib, threading 多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名
#coding:utf-8import urllib
import re
import threading
import os, timeclass Down_Tianya(threading.Thread):
"""多线程下载"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dtdef run(self):
print 'downling from %s' % self.url
self.down_text()def down_text(self):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根据第一页地址抓取总页数"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}print 'page num is : %s' % my_page
threads = []
"""根据页数构造urls进行多线程下载"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""检查下载完成后再进行写入"""
for t in threads:
t.join()write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
down_tianya.py
#coding:utf-8import urllib
import re
import threading
import osclass Down_Tianya(threading.Thread):
"""多线程下载"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dtdef run(self):
print 'downling from %s' % self.url
self.down_text()def down_text(self):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<div class="atl-item".*?<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根据第一页地址抓取总页数"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}print 'page num is : %s' % my_page
threads = []
"""根据页数构造urls进行多线程下载"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""检查下载完成后再进行写入"""
for t in threads:
t.join()write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
您可能感兴趣的文章:
- python多线程抓取天涯帖子内容示例
- python 抓取天涯帖子内容并保存
- python抓取网页内容示例分享
- Python之多线程爬虫抓取网页图片的示例代码
- Python selenium抓取微博内容的示例代码
- 尝试使用Python多线程抓取代理服务器IP地址的示例
- python抓取网页内容示例分享
- 尝试使用Python多线程抓取代理服务器IP地址的示例
- Python简单实现网页内容抓取功能示例
- Python爬虫实战(1)——百度贴吧抓取帖子并保存内容和图片
- Python requests 多线程抓取 出现HTTPConnectionPool Max retires exceeded异常
- [Python]网络爬虫(二):利用urllib2通过指定的URL抓取网页内容
- Python网页抓取:获取页面中某段内容的xpath
- [Python]网络爬虫(二):利用urllib2通过指定的URL抓取网页内容
- 运用python抓取博客园首页的所有数据,而且定时持续抓取新公布的内容存入mongodb中
- Python解决抓取内容乱码问题(decode和encode解码)
- Pyhton实例,抓取百度词条关于Python的内容(二)
- python beautifulsoup 抓取网页正文内容
- 【python爬虫】通过python多线程的抓取新浪新闻的标题时间评论信息
- python爬虫批量抓取新浪微博用户ID及用户信息、微博内容