您的位置:首页 > 编程语言 > Python开发

python爬虫(2.获取网页外链与内链)

2016-12-29 16:17 1601 查看
from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import re
import datetime
import random

pages = set()
random.seed(datetime.datetime.now())

#获取页面内链
def getInternalLinks(bsObj,includeUrl):
includeUrl=urlparse(includeUrl).scheme+"://"+urlparse(includeUrl).netloc
internalLinks=[]
for link in bsObj.findAll("a",href=re.compile("^(/|.*"+includeUrl+")")):
if link.attrs["href"] is not None:
if link.attrs["href"] not in internalLinks:
internalLinks.append(link.attrs['href'])
return internalLinks

#获取页面外链并且不包含当前url的链接  "^(http|www)((?!"+excludeUrl+").)*$"
def getExternalUrl(bsObj,excludeUrl):
externalUrl=[]
for link in bsObj.findAll("a",re.compile("^(www|http)((?!"+excludeUrl+").)*$")):
if link.attrs["href"] is not None:
if link.attrs["href"] not in externalUrl:
externalUrl.append(link.attrs['href'])
return externalUrl

#主页面的外链,若无就找主页面中内链随机一个,再在该内链中找外链
def getRandomExternalLink(startingPage):
html=urlopen(startingPage)
bsObj=BeautifulSoup(html)
externalLinks=getExternalUrl(bsObj,startingPage)
if len(externalLinks)==0:
domain = urlparse(startingPage).scheme+"://"+urlparse(startingPage).netloc
print("No external links, looking around the site for one")
InternalLinks=getInternalLinks(bsObj,domain)
page="http://"+InternalLinks[random.randint(0,len(InternalLinks)-1)]
getRandomExternalLink(page)
else:
return  externalLinks[random.randint(0, len(externalLinks)-1)]

def followExternalOnly(startingSite):
externalLinks=getRandomExternalLink(startingSite)
print("tha random external page is:"+externalLinks)
followExternalOnly(externalLinks)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: