基于宽度优先爬虫
2016-09-21 17:00
281 查看
# -*- coding: utf-8 -*-
from selenium import webdriver
import time
#from bs4 import BeautifulSoup
import urllib2
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import StaleElementReferenceException
import socket
import re
import zlib
import httplib,urllib,urlparse
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#链接判断有效性
'''
def urlStat(urls):
#errurl = []
try:
r = urllib.urlopen(url)
except urllib.URLError as e:
r = e
return r.code
for url in urls:
up = urlparse.urlparse(url)
#因为超链接有两种方式:一种是直接的http://...... 一种是相对路径,/.../sample.html
#所以要分别处理
if up.netloc =="" or up.scheme =="":
#域名为空,直接抛弃掉,不作处理
http = httplib.HTTP(uhost.netloc)
http.putrequest("GET", "/"+up.path+"?"+up.params+up.query+up.fragment)
http.putheader("Accept", "*/*")
http.endheaders()
continuee
else:
http = httplib.HTTP(up.netloc)
http.putrequest("GET", up.path+"?"+up.params+up.query+up.fragment)
http.putheader("Accept", "*/*")
http.endheaders()
try:
r = urllib.urlopen(url)
except urllib.URLError as e:
r = e
#errcode, errmsg, headers = http.getreply()
if r.code in (200,401):
return r.code
#rurl.append(url)
print url," : ok"
else:
#errurl.append(errurl)
#print url," : ",r.code
'''
class linkQueue:
def __init__(self):
#已访问的URL集合
self.visited = []
#待访问的URL集合
self.unvisited = []
#获取访问过的URL队列
def getVisitedUrl(self):
return self.visited
#获取未访问过的队列
def getUnvisitedUrl(self):
return self.unvisited
#添加URL到访问过的队列
def addVisitedUrl(self,url):
self.visited.append(url)
#移除已访问队列中URL
def removeVisitedUrl(self,url):
self.visited.remove(url)
#未访问过的URL出队列
def unVisitedUrlDeQueue(self):
try:
return self.unvisited.pop()
except:
return None
#保证每个URL只被访问一次
def addUnvisitedUrl(self,url):
if url!="" and url not in self.visited and url not in self.unvisited:#不为空并且不再已访问队列并且不再未访问队列
self.unvisited.insert(0,url)
#获取已访问的URL数量
def getVisitedUrlCount(self):
return len(self.visited)
#获取未访问的URL数量
def getUnvisitedUrlCount(self):
return len(self.unvisited)
#获取临时队列大小
def getTempUrlCount(self):
return len(self.temp)
#判断未访问的URL队列是否为空
def unVisitedUrlEmpty(self):
return len(self.unvisited) == 0
class Crawler:
def __init__(self,seeds):
#self.phantomjs_path = r'D:\python\phantomjs-2.1.1-windows\bin\phantomjs.exe'
#self.driver = webdriver.PhantomJS(self.phantomjs_path)
#初始化爬虫深度,爬虫深度普遍17层
self.current_deepth = 1
self.temp = [] #临时队列
self.linkQueue = linkQueue()#实例化队列
if isinstance(seeds,str): #字符串,传人单个URL
self.linkQueue.addUnvisitedUrl(seeds)
if isinstance(seeds,list): #列表,传入多个URL
for l in seeds:
self.linkQueue.addUnvisitedUrl(l)
print "Add the seeds url \"%s\" to the unvisited url list"%str(self.linkQueue.unvisited)
def Crawling(self,seeds,crawl_deepth):
#循环条件:抓取深度不超过crawl_deepth
while self.current_deepth <= crawl_deepth:
#循环条件:待抓取的链接不为空
while not self.linkQueue.unVisitedUrlEmpty():
#队头URL出队列
visitUrl = self.linkQueue.unVisitedUrlDeQueue()
print "Pop out one url \"%s\" from unvisited url list"%visitUrl
if visitUrl is None or visitUrl == "":
continue
#获取超链接
#links = self.getHref(visitUrl)
links = self.getUrl(visitUrl)
print "Get %d new links"%len(links)
#将URL放入已访问的URL队列中
self.linkQueue.addVisitedUrl(visitUrl)
print "Visited url count: "+str(self.linkQueue.getVisitedUrlCount())
print "Unvisited url count: "+str(self.linkQueue.getUnvisitedUrlCount())
print "Visited deepth: "+str(self.current_deepth)
num =self.linkQueue.getUnvisitedUrlCount()
self.fileOper("Unvisited url count: %d"%num)
try:
for link in links:
self.temp.append(link)
print "temp count :"+str(len(self.temp))
except Exception,e:
self.fileOper(str(e))
print str(e)
#未访问的URL入列
for link in self.temp:
self.linkQueue.addUnvisitedUrl(link)
#写入数据库
self.dataProcess = dataProcess()
self.dataProcess.insertTable(self.temp)
print "%d add to unvisited links:"%len(self.temp)
self.fileOper("%d add to unvisited links:"%len(self.temp))
self.temp = []
self.fileOper("% end"%self.current_deepth)
self.current_deepth += 1
ISOTIMEFORMAT='%Y-%m-%d %X'
print "Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime())
self.fileOper("Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
#self.fileOper("Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
#self.driver.close()
#获取源码中所有超链接
def getHref1(self,url):
urls = []
Rurls = []
data = self.getPageSource(url)
links = re.findall(r'''<a(\s*)(.*?)(\s*)href(\s*)=(\s*)([\"\s]*)([^\"\']+?)([\"\s]+)(.*?)>''',data,re.S|re.I)
for u in links:
urls.append(u[6])
Rurls = self.getDomianLinks(urls) #过滤掉非域名列表的链接
return Rurls
#获取网页源码
def getPageSource1(self,url):
#phantomjs_path = r'C:\Users\joyking.xu\Desktop\phantomjs-2.1.1-windows\bin\phantomjs.exe'
#driver = webdriver.PhantomJS(phantomjs_path)
self
4000
.driver.get(url)
#time.sleep(7)
self.waitForLoad(self.driver)
#print(driver.page_source)
pageSource = self.driver.page_source
#html = driver.page_source.find_element_by_xpath("//li")
#return (driver.find_elements_by_xpath("//li").text)
#driver.get_screenshot_as_file("2.jpg") #获取页面截图
#self.driver.close()
return pageSource
def getPageSource(self,url,timeout=10,coding=None):
try:
socket.setdefaulttimeout(timeout)
req = urllib2.Request(url)
req.add_header('User-agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')
response = urllib2.urlopen(req)
page = ''
if response.headers.get('Content-Encoding') == 'gzip':
page = zlib.decompress(page, 16+zlib.MAX_WBITS)
if coding is None:
coding= response.headers.getparam("charset")
#如果获取的网站编码为None
if coding is None:
page=response.read()
#获取网站编码并转化为utf-8
else:
page=response.read()
page=page.decode(coding).encode('utf-8')
return page
except Exception,e:
print str(e)
return [str(e),None]
def getUrl(self,url):
Rurls = []
data=self.getPageSource(url)
reg=r'"(http://.+?)"'
regob=re.compile(reg,re.DOTALL)
urllist=regob.findall(data)
Rurls = self.getDomianLinks(urllist)
return Rurls
#通过域名列表过滤出所需要链接
def getDomianLinks(self,urls):
Links = []
domainlist = ['act.vip.com','vip.com','trip.vip.com'] #爬取域列表
for url in urls:
up = urlparse.urlparse(url)
for domain in domainlist:
if up.netloc == domain:
Links.append(url)
else:
continue
return Links
def waitForLoad(self,driver): #隐式等待loading,页面加载成功
elem = driver.find_element_by_tag_name("li")
count = 0
while True:
count +=1
if count > 20:
print("Timing out after 10 seconds and returning")
return
time.sleep(.5)
try:
elem = driver.find_element_by_tag_name("li")
except StaleElementReferenceException:
return
#写文件
def fileOper(self,data):
content = str(data)
file = open('log.txt','ab+')
file.write(content+"\r\n")
file.close()
class dataProcess:
def __init__(self):
self.conn = MySQLdb.connect(host='10.199.243.34',user='root',passwd='vipshop-cms',db='vip_crawling',port=3306,charset='utf8')
self.cur = self.conn.cursor()
self.curtime = time.strftime('%Y-%m-%d',time.localtime(time.time()))
def insertTable(self,values):
data = []
try:
for i in values:
status = self.urlStat(i)
data = ['旅行PC',i,status,self.curtime]
#data.append('旅行PC',i,status,self.curtime)
#data.append(temp)
#if len(data)<=100:
self.cur.execute("insert into href_resource(platform,url,status,create_date) values(%s,%s,%s,%s)",data)
print "%d add to DB:"%len(values)
self.fileOper("%d add to DB:/r/n"%len(values))
data = []
except Exception,e:
print str(e)
self.conn.commit()
self.cur.close()
self.conn.close()
#链接判断有效性
def urlStat(self,url):
try:
r = urllib.urlopen(url)
return r.code
except urllib.URLError as e:
r = e
return r.code
#写文件
def fileOper(self,data):
content = str(data)
file = open('log.txt','ab+')
file.write(content+"\r\n")
file.close()
def main(seeds,crawl_deepth):
ISOTIMEFORMAT='%Y-%m-%d %X'
print "main start %s"%time.strftime(ISOTIMEFORMAT, time.localtime())
craw = Crawler(seeds)
craw.fileOper("main start %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
craw.Crawling(seeds,crawl_deepth)
if __name__ == '__main__':
main("http://trip.vip.com",3)
from selenium import webdriver
import time
#from bs4 import BeautifulSoup
import urllib2
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import StaleElementReferenceException
import socket
import re
import zlib
import httplib,urllib,urlparse
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#链接判断有效性
'''
def urlStat(urls):
#errurl = []
try:
r = urllib.urlopen(url)
except urllib.URLError as e:
r = e
return r.code
for url in urls:
up = urlparse.urlparse(url)
#因为超链接有两种方式:一种是直接的http://...... 一种是相对路径,/.../sample.html
#所以要分别处理
if up.netloc =="" or up.scheme =="":
#域名为空,直接抛弃掉,不作处理
http = httplib.HTTP(uhost.netloc)
http.putrequest("GET", "/"+up.path+"?"+up.params+up.query+up.fragment)
http.putheader("Accept", "*/*")
http.endheaders()
continuee
else:
http = httplib.HTTP(up.netloc)
http.putrequest("GET", up.path+"?"+up.params+up.query+up.fragment)
http.putheader("Accept", "*/*")
http.endheaders()
try:
r = urllib.urlopen(url)
except urllib.URLError as e:
r = e
#errcode, errmsg, headers = http.getreply()
if r.code in (200,401):
return r.code
#rurl.append(url)
print url," : ok"
else:
#errurl.append(errurl)
#print url," : ",r.code
'''
class linkQueue:
def __init__(self):
#已访问的URL集合
self.visited = []
#待访问的URL集合
self.unvisited = []
#获取访问过的URL队列
def getVisitedUrl(self):
return self.visited
#获取未访问过的队列
def getUnvisitedUrl(self):
return self.unvisited
#添加URL到访问过的队列
def addVisitedUrl(self,url):
self.visited.append(url)
#移除已访问队列中URL
def removeVisitedUrl(self,url):
self.visited.remove(url)
#未访问过的URL出队列
def unVisitedUrlDeQueue(self):
try:
return self.unvisited.pop()
except:
return None
#保证每个URL只被访问一次
def addUnvisitedUrl(self,url):
if url!="" and url not in self.visited and url not in self.unvisited:#不为空并且不再已访问队列并且不再未访问队列
self.unvisited.insert(0,url)
#获取已访问的URL数量
def getVisitedUrlCount(self):
return len(self.visited)
#获取未访问的URL数量
def getUnvisitedUrlCount(self):
return len(self.unvisited)
#获取临时队列大小
def getTempUrlCount(self):
return len(self.temp)
#判断未访问的URL队列是否为空
def unVisitedUrlEmpty(self):
return len(self.unvisited) == 0
class Crawler:
def __init__(self,seeds):
#self.phantomjs_path = r'D:\python\phantomjs-2.1.1-windows\bin\phantomjs.exe'
#self.driver = webdriver.PhantomJS(self.phantomjs_path)
#初始化爬虫深度,爬虫深度普遍17层
self.current_deepth = 1
self.temp = [] #临时队列
self.linkQueue = linkQueue()#实例化队列
if isinstance(seeds,str): #字符串,传人单个URL
self.linkQueue.addUnvisitedUrl(seeds)
if isinstance(seeds,list): #列表,传入多个URL
for l in seeds:
self.linkQueue.addUnvisitedUrl(l)
print "Add the seeds url \"%s\" to the unvisited url list"%str(self.linkQueue.unvisited)
def Crawling(self,seeds,crawl_deepth):
#循环条件:抓取深度不超过crawl_deepth
while self.current_deepth <= crawl_deepth:
#循环条件:待抓取的链接不为空
while not self.linkQueue.unVisitedUrlEmpty():
#队头URL出队列
visitUrl = self.linkQueue.unVisitedUrlDeQueue()
print "Pop out one url \"%s\" from unvisited url list"%visitUrl
if visitUrl is None or visitUrl == "":
continue
#获取超链接
#links = self.getHref(visitUrl)
links = self.getUrl(visitUrl)
print "Get %d new links"%len(links)
#将URL放入已访问的URL队列中
self.linkQueue.addVisitedUrl(visitUrl)
print "Visited url count: "+str(self.linkQueue.getVisitedUrlCount())
print "Unvisited url count: "+str(self.linkQueue.getUnvisitedUrlCount())
print "Visited deepth: "+str(self.current_deepth)
num =self.linkQueue.getUnvisitedUrlCount()
self.fileOper("Unvisited url count: %d"%num)
try:
for link in links:
self.temp.append(link)
print "temp count :"+str(len(self.temp))
except Exception,e:
self.fileOper(str(e))
print str(e)
#未访问的URL入列
for link in self.temp:
self.linkQueue.addUnvisitedUrl(link)
#写入数据库
self.dataProcess = dataProcess()
self.dataProcess.insertTable(self.temp)
print "%d add to unvisited links:"%len(self.temp)
self.fileOper("%d add to unvisited links:"%len(self.temp))
self.temp = []
self.fileOper("% end"%self.current_deepth)
self.current_deepth += 1
ISOTIMEFORMAT='%Y-%m-%d %X'
print "Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime())
self.fileOper("Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
#self.fileOper("Crawling end %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
#self.driver.close()
#获取源码中所有超链接
def getHref1(self,url):
urls = []
Rurls = []
data = self.getPageSource(url)
links = re.findall(r'''<a(\s*)(.*?)(\s*)href(\s*)=(\s*)([\"\s]*)([^\"\']+?)([\"\s]+)(.*?)>''',data,re.S|re.I)
for u in links:
urls.append(u[6])
Rurls = self.getDomianLinks(urls) #过滤掉非域名列表的链接
return Rurls
#获取网页源码
def getPageSource1(self,url):
#phantomjs_path = r'C:\Users\joyking.xu\Desktop\phantomjs-2.1.1-windows\bin\phantomjs.exe'
#driver = webdriver.PhantomJS(phantomjs_path)
self
4000
.driver.get(url)
#time.sleep(7)
self.waitForLoad(self.driver)
#print(driver.page_source)
pageSource = self.driver.page_source
#html = driver.page_source.find_element_by_xpath("//li")
#return (driver.find_elements_by_xpath("//li").text)
#driver.get_screenshot_as_file("2.jpg") #获取页面截图
#self.driver.close()
return pageSource
def getPageSource(self,url,timeout=10,coding=None):
try:
socket.setdefaulttimeout(timeout)
req = urllib2.Request(url)
req.add_header('User-agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')
response = urllib2.urlopen(req)
page = ''
if response.headers.get('Content-Encoding') == 'gzip':
page = zlib.decompress(page, 16+zlib.MAX_WBITS)
if coding is None:
coding= response.headers.getparam("charset")
#如果获取的网站编码为None
if coding is None:
page=response.read()
#获取网站编码并转化为utf-8
else:
page=response.read()
page=page.decode(coding).encode('utf-8')
return page
except Exception,e:
print str(e)
return [str(e),None]
def getUrl(self,url):
Rurls = []
data=self.getPageSource(url)
reg=r'"(http://.+?)"'
regob=re.compile(reg,re.DOTALL)
urllist=regob.findall(data)
Rurls = self.getDomianLinks(urllist)
return Rurls
#通过域名列表过滤出所需要链接
def getDomianLinks(self,urls):
Links = []
domainlist = ['act.vip.com','vip.com','trip.vip.com'] #爬取域列表
for url in urls:
up = urlparse.urlparse(url)
for domain in domainlist:
if up.netloc == domain:
Links.append(url)
else:
continue
return Links
def waitForLoad(self,driver): #隐式等待loading,页面加载成功
elem = driver.find_element_by_tag_name("li")
count = 0
while True:
count +=1
if count > 20:
print("Timing out after 10 seconds and returning")
return
time.sleep(.5)
try:
elem = driver.find_element_by_tag_name("li")
except StaleElementReferenceException:
return
#写文件
def fileOper(self,data):
content = str(data)
file = open('log.txt','ab+')
file.write(content+"\r\n")
file.close()
class dataProcess:
def __init__(self):
self.conn = MySQLdb.connect(host='10.199.243.34',user='root',passwd='vipshop-cms',db='vip_crawling',port=3306,charset='utf8')
self.cur = self.conn.cursor()
self.curtime = time.strftime('%Y-%m-%d',time.localtime(time.time()))
def insertTable(self,values):
data = []
try:
for i in values:
status = self.urlStat(i)
data = ['旅行PC',i,status,self.curtime]
#data.append('旅行PC',i,status,self.curtime)
#data.append(temp)
#if len(data)<=100:
self.cur.execute("insert into href_resource(platform,url,status,create_date) values(%s,%s,%s,%s)",data)
print "%d add to DB:"%len(values)
self.fileOper("%d add to DB:/r/n"%len(values))
data = []
except Exception,e:
print str(e)
self.conn.commit()
self.cur.close()
self.conn.close()
#链接判断有效性
def urlStat(self,url):
try:
r = urllib.urlopen(url)
return r.code
except urllib.URLError as e:
r = e
return r.code
#写文件
def fileOper(self,data):
content = str(data)
file = open('log.txt','ab+')
file.write(content+"\r\n")
file.close()
def main(seeds,crawl_deepth):
ISOTIMEFORMAT='%Y-%m-%d %X'
print "main start %s"%time.strftime(ISOTIMEFORMAT, time.localtime())
craw = Crawler(seeds)
craw.fileOper("main start %s"%time.strftime(ISOTIMEFORMAT, time.localtime()))
craw.Crawling(seeds,crawl_deepth)
if __name__ == '__main__':
main("http://trip.vip.com",3)
相关文章推荐
- 跟我一步一步学爬虫---宽度优先遍历篇(四)
- 宽度优先爬虫
- 宽度优先爬虫
- 2 宽度优先爬虫和带偏好的爬虫(2)
- 宽度优先爬虫和带偏好的爬虫
- 宽度优先爬虫和带偏好的爬虫的简单实现
- 网络爬虫(三)------宽度优先爬虫(一个的基础,简单但是很重要哦)
- 宽度优先遍历网络爬虫
- Java爬虫之宽度优先爬虫
- 宽度优先爬虫和带偏好的爬虫
- 一个简单的宽度优先网络爬虫
- Java宽度优先爬虫例子
- 宽度优先遍历爬虫的python实现
- c#宽度优先的网络爬虫
- 宽度优先爬虫-爬虫学习(四)
- java爬虫学习日记2-宽度优先爬虫代码实现
- 2 宽度优先爬虫和带偏好的爬虫(3)
- 实测可用的宽度优先爬虫的实现
- 2 宽度优先爬虫和带偏好的爬虫(1)
- 2 宽度优先爬虫和带偏好的爬虫(4)