您的位置:首页 > 其它

爬取煎蛋网的妹子图

2016-05-31 16:47 253 查看
先贴上别人的代码 用了 urllib.request

# -*- coding: utf-8 -*-
import urllib.request
import os

#在当前目录下创建文件夹,爬取前十页
def download_mm(folder='OOXX',pages=10):
os.mkdir(folder)
os.chdir(folder)#切换到文件夹里面

url='http://jandan.net/ooxx/'
page_num = int(get_page(url))#获取页数

for i in range(pages):
page_num-=1
page_url = url+'page-'+str(page_num)+'#comments'
img_addrs = find_imgs(page_url)
save_imgs(folder,img_addrs)

def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36')
reponse = urllib.request.urlopen(req)
html = reponse.read()
return html

#获取当前的页数
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('current-comment-page')+23
b = html.find(']',a)
return html[a:b]

#获取图片的地址
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')

while a!=-1:
b = html.find('.jpg',a,a+100)
if b!=-1:
img_addrs.append(html[a+9:b+4])
else:
b=a+9

a= html.find('img src=',b)

for each in img_addrs:
print(each)
return img_addrs

def save_imgs(folder,img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)

if __name__ == '__main__':
download_mm()


14行有问题 会导致漏掉第一页不会爬取

html.find这方法以前压根没见过 所以 出现的地方都不怎么看的懂

我用requests 和 beautifulsoup 改写了一下

import requests
import os
from bs4 import BeautifulSoup

def url_open(url):
header1={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36 QQBrowser/9.3.6874.400'}
res=requests.get(url,headers=header1)
html=res.content
return html

def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
soup=BeautifulSoup(html,'lxml')
s=soup.find_all(class_="view_img_link")

for i in s :
img_addrs.append(i.get('href'))

return img_addrs

def get_current_page(url):
html = url_open(url).decode('utf-8')
soup=BeautifulSoup(html,'lxml')
s=soup.find(class_="current-comment-page")
text=s.get_text()
return int(text[1:-1])

def save_imgs(img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)

def download_mm(folder='OOXX',pages=5):
os.mkdir(folder)
os.chdir(folder)

url='http://jandan.net/ooxx/'
page_num = get_current_page(url)+1

for i in range(pages):
page_num-=1
page_url = url+'page-'+str(page_num)+'#comments'
img_addrs = find_imgs(page_url)
save_imgs(img_addrs)

if __name__ == '__main__':
download_mm()


第47 行 page_num +1 可以让第一页被爬取

第8行 必须用 content 如果是text则不行 同理 urllib.request 用 read()

第16~19行 用beautifulsoup拿到href真的好容易

第35行 用split('/')分开得到字典? 貌似是吧 然后 下标-1 得到最后一部分
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: