python使用scrapy解析js示例
2016-05-13 10:35
791 查看
http://www.jb51.net/article/46104.htm
代码如下:
from selenium import selenium
class MySpider(CrawlSpider):
name = 'cnbeta'
allowed_domains = ['cnbeta.com']
start_urls = ['http://www.jb51.net']
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
Rule(SgmlLinkExtractor(allow=('/articles/.*\.htm', )),
callback='parse_page', follow=True),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
)
def __init__(self):
CrawlSpider.__init__(self)
self.verificationErrors = []
self.selenium = selenium("localhost", 4444, "*firefox", "http://www.jb51.net")
self.selenium.start()
def __del__(self):
self.selenium.stop()
print self.verificationErrors
CrawlSpider.__del__(self)
def parse_page(self, response):
self.log('Hi, this is an item page! %s' % response.url)
sel = Selector(response)
from webproxy.items import WebproxyItem
sel = self.selenium
sel.open(response.url)
sel.wait_for_page_to_load("30000")
import time
time.sleep(2.5)
代码如下:
from selenium import selenium
class MySpider(CrawlSpider):
name = 'cnbeta'
allowed_domains = ['cnbeta.com']
start_urls = ['http://www.jb51.net']
rules = (
# Extract links matching 'category.php' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
Rule(SgmlLinkExtractor(allow=('/articles/.*\.htm', )),
callback='parse_page', follow=True),
# Extract links matching 'item.php' and parse them with the spider's method parse_item
)
def __init__(self):
CrawlSpider.__init__(self)
self.verificationErrors = []
self.selenium = selenium("localhost", 4444, "*firefox", "http://www.jb51.net")
self.selenium.start()
def __del__(self):
self.selenium.stop()
print self.verificationErrors
CrawlSpider.__del__(self)
def parse_page(self, response):
self.log('Hi, this is an item page! %s' % response.url)
sel = Selector(response)
from webproxy.items import WebproxyItem
sel = self.selenium
sel.open(response.url)
sel.wait_for_page_to_load("30000")
import time
time.sleep(2.5)
相关文章推荐
- python 学习资料
- Python2.7注意点汇总(函数式编程)
- python 异常处理
- numpy.whl安装
- Python 与数据存储
- python在windows系统中打印中文乱码
- Windows上Python3.5安装Scrapy(lxml) 以及与twisted有关错误的解决
- Python新建/删除文件夹
- Python之路,Day1
- michael的Python笔记(三)
- Python简易爬虫--抓取任意数目百度百科内容
- 【python】零碎总结_转
- python基于phantomjs实现导入图片
- 如何查看Python的内置函数
- Python - 动手写个ORM
- Python 依赖库
- Python笔记:除、取整、取余、乘方
- Python笔记:加减乘除,格式化字符串
- ipython的两种安装方式
- 《Python核心编程》第7章 习题