您的位置:首页 > 运维架构 > 网站架构

scrapy+xpath爬取不可描述网站

2017-06-21 01:13 190 查看
今天来爬一个让人很有动力的网站,网址就不便放上来了,看看有没有有缘人能得知了


还是先来items.py

import scrapy

class AvmooItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name=scrapy.Field()
birthday=scrapy.Field()
age=scrapy.Field()
height=scrapy.Field()
cup=scrapy.Field()
bust=scrapy.Field()
waistline=scrapy.Field()
hipline=scrapy.Field()
birthplace=scrapy.Field()
Avatar=scrapy.Field()
designations=scrapy.Field()
des_imgs=scrapy.Field()
des_urls=scrapy.Field()


各位施主从这些字段应该就可以看出来了吧

接下来就是主爬取程序了

spider.py

# -*- coding:utf-8 -*-

import scrapy
from AVMOO.items import AvmooItem
import os
import requests

class AvmooSpider(scrapy.Spider):
name='AVMOO'
allowed_domains=['xxx.xx','jp.netcdn.space/digital/video/']#,'xxxx.xx']
start_urls=['https://xxx.xx/cn/actresses/']
base_url='https://xxx.xx'
des_imgs=[]

def parse(self,response):
star_urls=response.xpath('//a[@class="avatar-box text-center"]/@href').extract()
for star_url in star_urls:
yield scrapy.Request(star_url,callback=self.star_item)
next_url=self.base_url+response.xpath('//a[@name="nextpage"]/@href').extract()[0]
if next_url:
yield scrapy.Request(next_url,callback=self.parse)

def star_item(self,response):
item=AvmooItem()
item['name']=response.xpath('//span[@class="pb-10"]/text()').extract()[0]
item['age']=response.xpath('//p/text()')[1].extract()
item['birthday']=response.xpath('//p/text()')[0].extract()
item['height']=response.xpath('//p/text()')[2].extract()
item['cup']=response.xpath('//p/text()')[3].extract()
item['bust']=response.xpath('//p/text()')[4].extract()
item['waistline']=response.xpath('//p/text()')[5].extract()
item['hipline']=response.xpath('//p/text()')[6].extract()
item['birthplace']=response.xpath('//p/text()')[7].extract()
self.mkdir('/home/shitfly/webspider/AVMOO/avmoo',item['name'])
f=open(item['name']+'.txt','w+')
print '写入个人信息'
self.save_txt(item,f)
movie_urls=response.xpath('//a[@class="movie-box"]/@href').extract()
for movie_url in movie_urls:
yield scrapy.Request(movie_url,meta={'item1':item},callback=self.movie_item)
next_url=self.base_url+response.xpath('//a[@name="nextpage"]/@href').extract()[0]
if next_url:
yield scrapy.Request(next_url,meta
4000
={'item1':item},callback=self.star_item2)

def star_item2(self,response):
item1=response.meta['item1']
movie_urls=response.xpath('//a[@class="movie-box"]/@href').extract()
for movie_url in movie_urls:
yield scrapy.Request(movie_url,meta={'item1':item1},callback=self.movie_item)
try:
next_url=self.base_url+response.xpath('//a[@name="nextpage"]/@href').extract()[0]
except:
pass
if next_url:
yield scrapy.Request(next_url,meta={'item1':item1},callback=self.star_item2)

def movie_item(self,response):
item=AvmooItem()
item2=response.meta['item1']
item['name']=item2['name']
item['designations']=response.xpath('//span[@style="color:#CC0000;"]/text()').extract()[0]
base_path='/home/shitfly/webspider/AVMOO/avmoo'+'/'+item['name']
self.mkdir(base_path,item['designations'])
cover=response.xpath('//a[@class="bigImage"]/img/@src').extract()[0]
self.des_imgs=[]
self.des_imgs.append(cover)
des_imgs=response.xpath('//a[@class="sample-box"]/div[@class="photo-frame"]/img/@src').extract()
for des_img in des_imgs:
s=des_img.split('-')
s[0]=s[0]+str('jp-')
des_img=''.join(i for i in s)
self.des_imgs.append(des_img)
item['des_imgs']=self.des_imgs
yield item

def mkdir(self,base_path,title): #title是文件夹名
title=title.strip()
isExists=os.path.exists(os.path.join(base_path,title))
if not isExists:
print "新建",title,"文件夹"
os.makedirs(os.path.join(base_path,title))
os.chdir(os.path.join(base_path,title))
else:
print title,'已存在'
os.chdir(os.path.join(base_path,title))
return True

def save_txt(self,item,f):
it=[]
inf=['age','birthday','height','cup','bust','waistline','hipline','birthplace']
for i in inf:
it.append(item[i])
for it_e in it:
it_e=it_e+'\n'
f.write(it_e.encode('utf-8'))


网址已被和谐,这里希望图片保存到本地,所以还是在磁盘中建立文本文件和文件夹。想要传递字段的话,使用meta={‘item1’:item},下一个函数接收是item1=response.meta[‘item1’]。

保存图片的话在管道中来进行保存

pipelines.py

import requests

class AvmooPipeline(object):
def process_item(self, item, spider):
des_imgs=item['des_imgs']
path='/home/shitfly/webspider/AVMOO/avmoo'+'/'+item['name']+'/'+item['designations']
for i in range(len(des_imgs)):
image=requests.get(des_imgs[i])
path_i=path+'/'+str(i)+'.jpg'
f=open(path_i,'wb')
f.write(image.content)
f.close()
print u'正在保存图片:',des_imgs[i]
print u'图片路径:',path_i
print u'文件:',path

return item


需要在settings.py中使用这个管道

ITEM_PIPELINES = {
'AVMOO.pipelines.AvmooPipeline': 300,
}


来重写一下下载中间件编写您自己的下载器中间件

middlewares.py

import random

class RandomUserAgentMiddleware(object):

def __init__(self,agents):
self.agents=agents

@classmethod
def from_crawler(cls,crawler):
return cls(crawler.settings.getlist('USER_AGENTS'))

@classmethod
def from_settings(cls, settings):
return cls(settings.getlist('USER_AGENTS'))

def process_request(self,request,spider):
us=random.choice(self.agents)
print us
request.headers.setdefault('User-Agent',us)


在settings.py中加入一个USER_AGENTS = […]的列表

DOWNLOADER_MIDDLEWARES = {

'AVMOO.middlewares.RandomUserAgentMiddleware': 543,}


就可以随机使用一个USER_AGENTS了

在settings.py中添加

CONCURRENT_ITEMS=100#默认Item并发数
CONCURRENT_REQUESTS=16#默认Request并发数
CONCURRENT_REQUESTS_PER_DOMAIN = 16#默认每个域名的并发数
CONCURRENT_REQUESTS_PER_IP = 0#每个ip的最大并发数,0表示忽略


settings

未完待续。。。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  爬虫-python scrapy