您的位置:首页 > 运维架构 > Nginx

nginx分析日志

2013-05-13 20:44 281 查看
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import re
import sys
import ip_location
#定义一个时间类,可以选取要分析的时间段,如果没有指定时间段,则分析全部log
class TimeParser(object):
def __init__(self, re_time, str_time, period):
self.__re_time = re.compile(re_time)
self.__str_time = str_time
self.__period = period
def __get(self, line):
t = re.search(self.__re_time, line).group(0)
return time.mktime(time.strptime(t, self.__str_time))
def inPeriod(self, line):
t = self.__get(line)
return (t > time.mktime(time.strptime(self.__period[0], self.__str_time))
and t < time.mktime(time.strptime(self.__period[1], self.__str_time)))
class ParseLog(object):
def __init__(self, file, re_time, str_time, period):
#print type(file)
self.ip_dict = {}
self.url_dict = {}
BUF_SIZE = 2048
bigfile = open(file, 'r')
try:
tmp_lines = bigfile.readlines(BUF_SIZE)
while tmp_lines:
for eachline in tmp_lines:
try:
self.domain, self.parsetime, self.suffix = eachline.split("-")
except:
self.domain = eachline.split(".")[0]
self.parsetime = "unknown time"
tmp_lines = bigfile.readlines(BUF_SIZE)
finally:
bigfile.close()
#定义一个函数,用来统计数量和总流量,并存入到相应字典中
def Count(self):
#用TimeParser实例化CountTime
CountTime = TimeParser(re_time, str_time, period)
self.total_traffic = []
"""
以下for循环分析每一行,如果这一行不包含时间,就跳过,如果包含时间信息,且在所分析时间段内,
则统计ip和traffic,没有http_refer信息的行只记录ip,然后跳过!
"""
with open(file) as f:
for i, line in enumerate(f):
try:
if CountTime.inPeriod(line):
ip = line.split()[0]
try:
traffic = int(re.findall(r'\d{3}\ [^0]\d+', line)[0].split()[1])
except:
continue
try:
url = re.findall(r'GET\ .*\.*\ ', line)[0].split()[1]
except IndexError:
url = "unknown"
else:
continue
except AttributeError:
continue
self.ip_dict.setdefault(ip, {'number': 0, 'traffic': 0})['number'] += 1
self.ip_dict.setdefault(ip, {'number': 0, 'traffic': 0})['traffic'] += int(traffic)
self.url_dict.setdefault(url, 0)
self.url_dict[url] += int(traffic)
if not i % 1000000:
print "have processed " + str(i) + " lines !"
#统计总流量
self.total_traffic.append(int(traffic))
total = sum(self.total_traffic)
#打印总流量大小
print "******************************************************************"
print self.domain + " all the traffic in " + self.parsetime + " is below:"
print "total_traffic: %s " % str(total / 1024 / 1024) + "MB"
"""定义两个字典,分别存储ip的数量和流量信息"""
def TopIp(self, number):
self.Count()
TopNumberIp = {}
TopTrafficIp = {}
#对字典赋值
for ip in self.ip_dict.keys():
TopNumberIp[ip] = self.ip_dict[ip]['number']
TopTrafficIp[ip] = self.ip_dict[ip]['traffic']
#按值从大到小的顺序排序键
SortIpNo = sorted(TopNumberIp.items(), key=lambda e: e[1], reverse=True)
#print SortIpNo[0][1]
SortIpTraffic = sorted(TopTrafficIp.items(), key=lambda e: e[1], reverse=True)
#输出连接数top 100 ip的相关信息到文件TopIpNo.txt中
reload(sys)
sys.setdefaultencoding('utf-8')
ipno = open('D:\\TopIpNo.txt', 'w+')
ipno.write(u"ip地址\t\t\t访问次数\t\t国家/区域/城市\t\t\t\t\t\t\t\t\t\t\t\t\t\t运营商\n")
ipno.write(
"-------------------------------------------------------------------------------------------------\n")
for i in range(number):
try:
ipno.write(SortIpNo[i][0] + "\t\t" + str(SortIpNo[i][1]) + "\t\t\t" + ip_location.ip_location(
SortIpNo[i][0]) + "\n")
except:
continue
ipno.write(
"-------------------------------------------------------------------------------------------------\n")
ipno.close()
#输出流量top 100 ip的相关信息到文件iptraffic.txt中
iptr = open('D:\\iptraffic.txt', 'w+')
iptr.write(u"ip地址".ljust(15) + u"总流量(MB)".ljust(15) + u"国家/区域/城市".ljust(40) + u"运营商" + "\n")
iptr.write(
"-------------------------------------------------------------------------------------------------\n")
for i in range(number):
try:
iptr.write(SortIpTraffic[i][0] .ljust(20) + str(SortIpTraffic[i][1] / 1024 / 1024))
#记入地理信息
iptr.write("\t\t\t" + ip_location.ip_location(SortIpTraffic[i][0]) + "\n")
except:
continue
iptr.write(
"-------------------------------------------------------------------------------------------------\n")
iptr.close()
def TopUrl(self, number):
SortUrlTraffic = sorted(self.url_dict.items(), key=lambda e: e[1], reverse=True)
#输出流量top 100 url相关信息到urltraffic.txt文件中
urtr = open('D:\\urltraffic.txt', 'w+')
urtr.write("Filename".ljust(100) + u"TotalTraffic(MB)" + "\n")
urtr.write("-----------------------------------------------------------------------------------------\n")
for i in range(number):
try:
urtr.write(SortUrlTraffic[i][0].ljust(100) + ":" + str(SortUrlTraffic[i][1] / 1024 / 1024) + "\n")
except:
continue
urtr.write("-----------------------------------------------------------------------------------------\n")
urtr.close()
#时间的正则和格式,一般不需要更改
re_time = '\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2}'
str_time = '%d/%b/%Y:%H:%M:%S'
#定义分析的时间段
period = ("16/Nov/2000:16:00:00", "16/Nov/2015:17:00:00")
#定义输出top number
number = 100
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'no logfile specified!'
print "Usage: python logParser.py filename"
time.sleep(2)
sys.exit()
else:
file = sys.argv[1]
lp = ParseLog(file, re_time, str_time, period)
startime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print "Start to parse the " + file + " struggling!at %s please wait patiently!" % startime
print
print "******************************************************************"
lp.TopIp(number)
lp.TopUrl(number)
endtime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print "The log had parse end  at %s !!!" % endtime


转自群里一个好友的分享
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: