您的位置:首页 > 其它

Crawlpojproblem

2016-06-03 23:25 190 查看
setting.py

# -*- coding: utf-8 -*-

# Scrapy settings for poj project

#

# For simplicity, this file contains only settings considered important or

# commonly used. You can find more settings consulting the documentation:

#

#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'poj'

SPIDER_MODULES = ['poj.spiders']

NEWSPIDER_MODULE = 'poj.spiders'

ITEM_PIPELINES={'poj.pipelines.PojPipeline':300}

USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.99 Safari/537.36'

COOKIES_ENABLED=True

SPIDER_MIDDLEWARES = {

        'scrapy.spidermiddlewares.referer.RefererMiddleware': True,

}

REFERER_ENABLED=True

# Crawl responsibly by identifying yourself (and your website) on the user-agent

#USER_AGENT = 'poj (+http://www.yourdomain.com)'

# Obey robots.txt rules

#ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)

#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)

# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs

#DOWNLOAD_DELAY = 3

# The download delay setting will honor only one of:

#CONCURRENT_REQUESTS_PER_DOMAIN = 16

#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)

#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)

#TELNETCONSOLE_ENABLED = False

# Override the default request headers:

#DEFAULT_REQUEST_HEADERS = {

#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',

#   'Accept-Language': 'en',

#}

# Enable or disable spider middlewares

# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {

#    'poj.middlewares.MyCustomSpiderMiddleware': 543,

#}

# Enable or disable downloader middlewares

# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {

#    'poj.middlewares.MyCustomDownloaderMiddleware': 543,

#}

# Enable or disable extensions

# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {

#    'scrapy.extensions.telnet.TelnetConsole': None,

#}

# Configure item pipelines

# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {

#    'poj.pipelines.SomePipeline': 300,

#}

# Enable and configure the AutoThrottle extension (disabled by default)

# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True

# The initial download delay

#AUTOTHROTTLE_START_DELAY = 5

# The maximum download delay to be set in case of high latencies

#AUTOTHROTTLE_MAX_DELAY = 60

# The average number of requests Scrapy should be sending in parallel to

# each remote server

#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# Enable showing throttling stats for every response received:

#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)

# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True

#HTTPCACHE_EXPIRATION_SECS = 0

#HTTPCACHE_DIR = 'httpcache'

#HTTPCACHE_IGNORE_HTTP_CODES = []

#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

spider.py:

#!/usr/bin/env python

# coding=utf-8

from scrapy.spiders import Spider

from poj.items import PojItem

from bs4 import BeautifulSoup

from scrapy.http import Request

from scrapy.selector import Selector

class PojSpider(Spider):

    name='poj'

    allowed_domains=['poj.org']

    download_delay=0.1

    start_urls=['http://poj.org/problemlist?volume=1']

   # def start_requests(self):

   #     headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36'}

   #     print 'here'*10

   #     yield Request('http://poj.org/problemlist?volume=1',headers=headers,callback=self.parse)

#    def parse(self,response):

#    #    item=PojItem()

#    #    item['title']='hello word'

#    #    item['link']='www.'

#    #    item['content']='wdcwdc'

#    #    return item

#        soup=BeautifulSoup(response.body)

#        tags=soup.findAll("td",attrs={"align":"left"})

#        print 'tags:',tags

#        print '!'*10

#        for tag in tags:

#            item=PojItem()

#            item['title']=tag.text

#            item['link']=tag.find('a').get('href')

#            next_url='http://poj.org/'+item['link']

#            print next_url

#            yield Request(url=next_url,meta={'item':item},callback=self.parse2)

    def parse(self,response):

        sel=Selector(response)

        sites=sel.xpath('//tr[@align="center"]/td[@align="left"]/a')

        for site in sites:

            item=PojItem()

            item['title']=site.xpath("text()").extract()

            item['link']=site.xpath("@href").extract()

            next_url="http://poj.org/"+item['link'][0]

            item['link']=next_url

            print next_url

            yield Request(url=next_url,meta={'item':item},callback=self.parse2)

   # def parse2(self,response):

   #     soup=BeautifulSoup(response.body)

   #     tag=soup.find("div",attrs={"class":"rich_media_content","id":"js_content"})

   #     content_list=[tag_i.text for tag_i in tag.findAll('p')]

   #     content="".join(content_list)

   #    # content='wdc'

   #     item=response.meta['item']

   #     item['content']=content

   #     return item

    def parse2(self,response):

        sel=Selector(response)

        tags=sel.xpath('//div[@class="ptx"]')

        #content_list=tags[0].xpath("text()").extract()

        content_list=[tag.xpath("text()").extract() for tag in tags]

        print type(content_list)

       # print 'content_list:',content_list

       # content="".join(content_list)

        item=response.meta['item']

        item['content']=content_list

        return item
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: