您的位置:首页 > 理论基础 > 计算机网络

IPVS源代码分析---tcp和udp协议实现

2014-02-26 16:02 776 查看
以tcp协议为例
struct ip_vs_protocol ip_vs_protocol_tcp = {
.name =                 "TCP",
.protocol =              IPPROTO_TCP,
.dont_defrag =           0,
.appcnt =                ATOMIC_INIT(0),
.init =                    ip_vs_tcp_init,
.exit =                   ip_vs_tcp_exit,
.register_app =           tcp_register_app,
.unregister_app =        tcp_unregister_app,
.conn_schedule =        tcp_conn_schedule,
.conn_in_get =           tcp_conn_in_get,
.conn_out_get =         tcp_conn_out_get,
.snat_handler =          tcp_snat_handler,
.dnat_handler =          tcp_dnat_handler,
.csum_check =           tcp_csum_check,
.state_name =           tcp_state_name,
.state_transition =       tcp_state_transition,
.app_conn_bind =        tcp_app_conn_bind,
.debug_packet =         ip_vs_tcpudp_debug_packet,
.timeout_change =       tcp_timeout_change,
.set_state_timeout =    tcp_set_state_timeout,
};

IPVS定义的超时,和netfilter类似,不过比netfilter的超时少得多,而且这些值不是通过/proc调整,而是通过ipvsadm命令来调整
static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE]              =       2*HZ,
[IP_VS_TCP_S_ESTABLISHED]       =       15*60*HZ,
[IP_VS_TCP_S_SYN_SENT]          =       2*60*HZ,
[IP_VS_TCP_S_SYN_RECV]          =       1*60*HZ,
[IP_VS_TCP_S_FIN_WAIT]          =       2*60*HZ,
[IP_VS_TCP_S_TIME_WAIT]         =       2*60*HZ,
[IP_VS_TCP_S_CLOSE]             =       10*HZ,
[IP_VS_TCP_S_CLOSE_WAIT]        =       60*HZ,
[IP_VS_TCP_S_LAST_ACK]          =       30*HZ,
[IP_VS_TCP_S_LISTEN]            =       2*60*HZ,
[IP_VS_TCP_S_SYNACK]            =       120*HZ,
[IP_VS_TCP_S_LAST]              =       2*HZ,
};
连接调度的目的是找到一个合适的目的服务器,生成新连接。该函数在ip_vs_in()函数中调用:pp->conn_schedule(af, skb, pp, &v, &cp),这样就调用了tcp_conn_schedule.
在ip_vs_in中 然后 调用 ret = cp->packet_xmit(skb, cp, pp); 如果packe_xmit 设置为nat方式,即ip_vs_nat_xmit函数,那么
其中调用:pp->dnat_handler(skb, pp, cp)  tcp_dnat_handler
其中调用:ip_vs_app_pkt_in(cp, skb),对于ftp 会调用 ip_vs_ftp_in,在这个函数里 会 过滤 client发过来的数据包,对于主动连接,会找到PORT xxx 指令,进而建立一个connection,并把该connection的control connection 设置为 cp.
而如果packet_xmit 设置为DR或者tunnel方式,则不会调用dnat_handler,所以也不会调用到ip_vs_ftp_in。对于这两种xmit方式,都需要利用ipvsadm 设置 persistent属性。那这里是不是意味着 对于 nat方式的FTP,不需要设置 persistent connetion就可以工作呢?
static int tcp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp){struct ip_vs_service *svc;struct tcphdr _tcph, *th;//取出tcp头th = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_tcph), &_tcph);if (th == NULL) {*verdict = NF_DROP;return 0;}//如果是syn包且有虚拟服务器if (th->syn && (svc = ip_vs_service_get(skb->nfmark, skb->nh.iph->protocol, skb->nh.iph->daddr, th->dest))) {if (ip_vs_todrop()) { //是否这虚拟服务器本身已经负载严重ip_vs_service_put(svc);*verdict = NF_DROP;return 0;}*cpp = ip_vs_schedule(svc, skb); //建立ipvs连接if (!*cpp) { //没有成功*verdict = ip_vs_leave(svc, skb, pp); //后续处理,更新统计包,发送icmp不可达数据包等.return 0;}ip_vs_service_put(svc);}return 1;}
/* 很重要的函数*  IPVS main scheduling function*  It selects a server according to the virtual service, and creates a connection entry.*  Protocols supported: TCP, UDP*/struct ip_vs_conn *ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb){struct ip_vs_conn *cp = NULL;struct iphdr *iph = skb->nh.iph;struct ip_vs_dest *dest;__u16 _ports[2], *pptr;//TCP/UDP头指针,[0]为源端口,[1]为目的端口pptr = skb_header_pointer(skb, iph->ihl*4, sizeof(_ports), _ports);if (pptr == NULL)return NULL;if (svc->flags & IP_VS_SVC_F_PERSISTENT) //处理持久服务器return ip_vs_sched_persist(svc, skb, pptr);//目的端口不等于服务端口,IPVS不处理该包if (!svc->fwmark && pptr[1] != svc->port) {if (!svc->port)IP_VS_ERR("Schedule: port zero only supported in persistent services, check your ipvs configuration\n");return NULL;}//调用调度器的调度函数获取一个目的服务器指针,调度器的调度函数看上面IPVS调度算法.dest = svc->scheduler->schedule(svc, skb);if (dest == NULL) {IP_VS_DBG(1, "Schedule: no dest found.\n");return NULL;}//新建一个IPVS连接cp = ip_vs_conn_new(iph->protocol, iph->saddr, pptr[0], iph->daddr, pptr[1], dest->addr, dest->port ? dest->port : pptr[1], 0, dest);if (cp == NULL)return NULL;//更新服务和连接相关计数器统计ip_vs_conn_stats(cp, svc);return cp;}
建立一个新连接struct ip_vs_conn * ip_vs_conn_new(int proto, __u32 caddr, __u16 cport, __u32 vaddr, __u16 vport,__u32 daddr, __u16 dport, unsigned flags, struct ip_vs_dest *dest){struct ip_vs_conn *cp;struct ip_vs_protocol *pp = ip_vs_proto_get(proto);//从cache中分配连接cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);if (cp == NULL) {IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");return NULL;}memset(cp, 0, sizeof(*cp));INIT_LIST_HEAD(&cp->c_list);init_timer(&cp->timer);cp->timer.data     = (unsigned long)cp;cp->timer.function = ip_vs_conn_expire; //连接超时函数cp->protocol       = proto;cp->caddr          = caddr;cp->cport          = cport;cp->vaddr          = vaddr;cp->vport          = vport;cp->daddr          = daddr;cp->dport          = dport;cp->flags          = flags;spin_lock_init(&cp->lock);atomic_set(&cp->refcnt, 1);// 引用初始值为1atomic_set(&cp->n_control, 0);// 子连接数置0atomic_set(&cp->in_pkts, 0);atomic_inc(&ip_vs_conn_count);if (flags & IP_VS_CONN_F_NO_CPORT)atomic_inc(&ip_vs_conn_no_cport_cnt);ip_vs_bind_dest(cp, dest); // 将连接和目的服务器进行绑定cp->state = 0; //连接初始状态为0cp->timeout = 3*HZ;  // 缺省超时为3秒ip_vs_bind_xmit(cp);// 绑定连接的数据包的发送方法//绑定协议应用,其实目前只有TCP的FTP一种,所以用了unlikelyif (unlikely(pp && atomic_read(&pp->appcnt)))//调用协议的app_conn_bind成员函数,对TCP协议来说就是tcp_app_conn_bind()函数//只在NAT模式下有效//检查该端口是否属于某多连接应用协议,是的话连接上绑定该协议处理, 相当于netfilter的连接的helperip_vs_bind_app(cp, pp);//实现 return pp->app_conn_bind(cp);//将该连接节点加入到IPVS连接表中ip_vs_conn_hash(cp);return cp;}绑定连接目的服务器static inline void ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest){if (!dest)return;atomic_inc(&dest->refcnt);//根据服务器情况设置连接标志,主要是用来确定连接数据包的发送方法cp->flags |= atomic_read(&dest->conn_flags);cp->dest = dest;if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {/* It is a normal connection, so increase the inactive connection counter because it is in TCP SYNRECVstate (inactive) or other protocol inacive state */atomic_inc(&dest->inactconns); // 增加目的服务器的不活动连接计数,目前还属于不活动连接} else {/* It is a persistent connection/template, so increase the peristent connection counter */atomic_inc(&dest->persistconns); // 如果是永久连接或模板,增加目的服务器的永久连接计数}//检查目的服务器的连接数是否超载了if (dest->u_threshold != 0 && ip_vs_dest_totalconns(dest) >= dest->u_threshold)dest->flags |= IP_VS_DEST_F_OVERLOAD;}绑定发送方法,参看下面发送方法实现static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp){//#define IP_VS_FWD_METHOD(cp)  (cp->flags & IP_VS_CONN_F_FWD_MASK)switch (IP_VS_FWD_METHOD(cp)) {case IP_VS_CONN_F_MASQ:cp->packet_xmit = ip_vs_nat_xmit; // NAT发送break;case IP_VS_CONN_F_TUNNEL:cp->packet_xmit = ip_vs_tunnel_xmit;// TUNNEL发送break;case IP_VS_CONN_F_DROUTE:cp->packet_xmit = ip_vs_dr_xmit;// DR发送break;case IP_VS_CONN_F_LOCALNODE:cp->packet_xmit = ip_vs_null_xmit;// 本地包break;case IP_VS_CONN_F_BYPASS:cp->packet_xmit = ip_vs_bypass_xmit;// 旁路发送break;}}要实现面向连接的处理的基本功能就是根据数据包内容查找连接,IPVS区分每个连接的关键数据和netfilter一样是五元组,为IP协议、源地址、源端口、目的地址和目的端口,不过没定义方向的概念,所以在IPVS中请求方向和回应方向要用不同的查找函数处理,由于IPVS是在INPUT点处理请求, 在FORWARD点处理回应包,不会在同一个点同时处理请求包和回应包,因此可以没有方向的概念。在ip_vs_out()函数中正向调用,在ip_vs_out_icmp()函数中反向调用static struct ip_vs_conn * tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,const struct iphdr *iph, unsigned int proto_off, int inverse){__u16 _ports[2], *pptr;//TCP/UDP头指针,[0]为源端口,[1]为目的端口pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);if (pptr == NULL)return NULL;if (likely(!inverse)) { //正向还是反向,在绝大多数情况下是按正向查找连接return ip_vs_conn_in_get(iph->protocol, iph->saddr, pptr[0], iph->daddr, pptr[1]);} else {return ip_vs_conn_in_get(iph->protocol, iph->daddr, pptr[1], iph->saddr, pptr[0]);}}struct ip_vs_conn *ip_vs_conn_in_get(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port){struct ip_vs_conn *cp;cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port);//没有找到,源端口设为0,再次查找if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt))cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port);return cp;}static inline struct ip_vs_conn *__ip_vs_conn_in_get(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port){unsigned hash;struct ip_vs_conn *cp;//入(请求)方向计算HASH值是用源的三元组来计算:IP协议、源地址、源端口hash = ip_vs_conn_hashkey(protocol, s_addr, s_port);ct_read_lock(hash);list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {//caddr,cport是连接记录的客户端的地址和端口if (s_addr == cp->caddr && s_port == cp->cport && d_port == cp->vport && d_addr == cp->vaddr &&//连接中的客户端端口为0的情况((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && protocol == cp->protocol) {/* HIT */atomic_inc(&cp->refcnt); //增加连接引用ct_read_unlock(hash);return cp;}}ct_read_unlock(hash);return NULL;}与in几乎一样static struct ip_vs_conn *tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,const struct iphdr *iph, unsigned int proto_off, int inverse){__u16 _ports[2], *pptr;pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);if (pptr == NULL)return NULL;if (likely(!inverse)) {return ip_vs_conn_out_get(iph->protocol, iph->saddr, pptr[0], iph->daddr, pptr[1]);} else {return ip_vs_conn_out_get(iph->protocol, iph->daddr, pptr[1], iph->saddr, pptr[0]);}}struct ip_vs_conn *ip_vs_conn_out_get(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port){unsigned hash;struct ip_vs_conn *cp, *ret=NULL;//出方向计算HASH值是用目的三元组来计算:IP协议、目的地址和目的端口,//这样计算结果和入方向的计算值是相同的hash = ip_vs_conn_hashkey(protocol, d_addr, d_port);ct_read_lock(hash);list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {if (d_addr == cp->caddr && d_port == cp->cport && s_port == cp->dport && s_addr == cp->daddr &&protocol == cp->protocol) {/* HIT */atomic_inc(&cp->refcnt);ret = cp;break;}}ct_read_unlock(hash);return ret;}该函数完成对协议部分数据进行源NAT操作,对TCP来说,NAT部分的数据就是源端口static int tcp_snat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp){struct tcphdr *tcph;unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4;//NAT操作skb必须是可写的if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))return 0;if (unlikely(cp->app != NULL)) {// 如果是多连接协议,进行应用协议内容部分数据的修改//目前只支持FTP协议,对FTP作NAT时,需要修改PORT命令或227回应内容中的地址端口信息if (pp->csum_check && !pp->csum_check(*pskb, pp))return 0;/* Call application helper if needed */if (!ip_vs_app_pkt_out(cp, pskb))return 0;}tcph = (void *)(*pskb)->nh.iph + tcphoff;tcph->source = cp->vport;// 修改当前TCP源端口if (!cp->app) {//如果只修改了源端口一个参数,就值需要用差值法快速计算新的TCP校验和tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, cp->dport, cp->vport);if ((*pskb)->ip_summed == CHECKSUM_HW)(*pskb)->ip_summed = CHECKSUM_NONE;} else {// 如果修改了协议内容部分数据,需要根据全部数据重新计算TCP校验和tcph->check = 0;(*pskb)->csum = skb_checksum(*pskb, tcphoff, (*pskb)->len - tcphoff, 0);tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, (*pskb)->len - tcphoff, cp->protocol, (*pskb)->csum);}return 1;}应用协议修改输出方向的应用层数据int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb){struct ip_vs_app *app;//检查连接是否和应用绑定if ((app = cp->app) == NULL)return 1;//TCP协议另外单独处理if (cp->protocol == IPPROTO_TCP)return app_tcp_pkt_out(cp, pskb, app);//非TCP协议调用应用协议的pkt_out()函数,我们只看tcp协议if (app->pkt_out == NULL)return 1;return app->pkt_out(app, cp, pskb, NULL);}处理TCP应用发出方向的数据包static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, struct ip_vs_app *app){int diff;//计算偏移值unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;struct tcphdr *th;__u32 seq;//首先要让数据包可写if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))return 0;th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);seq = ntohl(th->seq);//当前的序列号if (cp->flags & IP_VS_CONN_F_OUT_SEQ)vs_fix_seq(&cp->out_seq, th); //修改发出方向序列号if (cp->flags & IP_VS_CONN_F_IN_SEQ)vs_fix_ack_seq(&cp->in_seq, th); //修改进入方向序列号if (app->pkt_out == NULL)return 1;//调用应用协议的pkt_out()函数,看下面处理多连接协议if (!app->pkt_out(app, cp, pskb, &diff))return 0;if (diff != 0)//数据长度发生变化,再次修改发出方向的序列号vs_seq_update(cp, &cp->out_seq, IP_VS_CONN_F_OUT_SEQ, seq, diff);return 1;}该函数完成对协议部分数据进行目的NAT操作,对TCP来说,NAT部分的数据就是目的端口static int tcp_dnat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp){struct tcphdr *tcph;unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4;/* csum_check requires unshared skb */if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph)))return 0;if (unlikely(cp->app != NULL)) {// 如果是多连接协议,进行应用协议内容部分数据的修改// 目前只支持FTP协议,对FTP作NAT时,需要修改PORT命令或227回应内容中的地址端口信息if (pp->csum_check && !pp->csum_check(*pskb, pp))return 0;if (!ip_vs_app_pkt_in(cp, pskb))return 0;}tcph = (void *)(*pskb)->nh.iph + tcphoff;tcph->dest = cp->dport;// 修改当前TCP目的端口if (!cp->app) {/* Only port and addr are changed, do fast csum update */tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, cp->vport, cp->dport);if ((*pskb)->ip_summed == CHECKSUM_HW)(*pskb)->ip_summed = CHECKSUM_NONE;} else {/* full checksum calculation */tcph->check = 0;(*pskb)->csum = skb_checksum(*pskb, tcphoff, (*pskb)->len - tcphoff, 0);tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, (*pskb)->len - tcphoff, cp->protocol, (*pskb)->csum);(*pskb)->ip_summed = CHECKSUM_UNNECESSARY;}return 1;}int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb){struct ip_vs_app *app;//检查连接是否和应用绑定if ((app = cp->app) == NULL)return 1;if (cp->protocol == IPPROTO_TCP)//TCP协议另外单独处理return app_tcp_pkt_in(cp, pskb, app);if (app->pkt_in == NULL)return 1;return app->pkt_in(app, cp, pskb, NULL);}static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, struct ip_vs_app *app){int diff;unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;struct tcphdr *th;__u32 seq;//首先要让数据包可写if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))return 0;th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);seq = ntohl(th->seq);if (cp->flags & IP_VS_CONN_F_IN_SEQ)//修改进入方向序列号vs_fix_seq(&cp->in_seq, th);if (cp->flags & IP_VS_CONN_F_OUT_SEQ)//修改发出方向序列号vs_fix_ack_seq(&cp->out_seq, th);if (app->pkt_in == NULL)return 1;if (!app->pkt_in(app, cp, pskb, &diff))//调用应用协议的pkt_in()函数,参看下面处理多连接协议return 0;if (diff != 0)//数据长度发生变化,再次修改输入方向的序列号vs_seq_update(cp, &cp->in_seq, IP_VS_CONN_F_IN_SEQ, seq, diff);return 1;}计算IP协议中的校验和,对于TCP,UDP头中都有校验和参数,TCP中的校验和是必须的,而UDP的校验和可以不用计算。该函数用的都是linux内核提供标准的校验和计算函数static int tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp){unsigned int tcphoff = skb->nh.iph->ihl*4;switch (skb->ip_summed) {case CHECKSUM_NONE:skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);case CHECKSUM_HW:if (csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, skb->len - tcphoff, skb->nh.iph->protocol, skb->csum)) {IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for");return 0;}break;default:/* CHECKSUM_UNNECESSARY */break;}return 1; //checksum correct}该函数返回协议状态名称字符串static const char * tcp_state_name(int state){if (state >= IP_VS_TCP_S_LAST)return "ERR!";return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";}static char * tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {[IP_VS_TCP_S_NONE]              =       "NONE",[IP_VS_TCP_S_ESTABLISHED]       =       "ESTABLISHED",[IP_VS_TCP_S_SYN_SENT]          =       "SYN_SENT",[IP_VS_TCP_S_SYN_RECV]          =       "SYN_RECV",[IP_VS_TCP_S_FIN_WAIT]          =       "FIN_WAIT",[IP_VS_TCP_S_TIME_WAIT]         =       "TIME_WAIT",[IP_VS_TCP_S_CLOSE]             =       "CLOSE",[IP_VS_TCP_S_CLOSE_WAIT]        =       "CLOSE_WAIT",[IP_VS_TCP_S_LAST_ACK]          =       "LAST_ACK",[IP_VS_TCP_S_LISTEN]            =       "LISTEN",[IP_VS_TCP_S_SYNACK]            =       "SYNACK",[IP_VS_TCP_S_LAST]              =       "BUG!",};IPVS的TCP状态转换和netfilter是类似的,在NAT模式下几乎就是相同的,在TUNNEL和DR模式下是半连接的状态转换。在每个数据包进出IPVS时都会调用static int tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_protocol *pp){struct tcphdr _tcph, *th;th = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_tcph), &_tcph);if (th == NULL)return 0;spin_lock(&cp->lock);set_tcp_state(pp, cp, direction, th);// 重新设置连接状态spin_unlock(&cp->lock);return 1;}static inline void set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int direction, struct tcphdr *th){int state_idx;int new_state = IP_VS_TCP_S_CLOSE;// 缺省新状态,连接关闭int state_off = tcp_state_off[direction];// 各方向的状态偏移值,确定是用转换表中的哪个数组if (cp->flags & IP_VS_CONN_F_NOOUTPUT) {// 修正一下半连接时的控制参数if (state_off == TCP_DIR_OUTPUT)cp->flags &= ~IP_VS_CONN_F_NOOUTPUT;elsestate_off = TCP_DIR_INPUT_ONLY;}if ((state_idx = tcp_state_idx(th)) < 0) {// 根据TCP标志返回状态索引号IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);goto tcp_state_out;}// 从状态转换表中查新状态new_state = tcp_state_table[state_off + state_idx].next_state[cp->state];tcp_state_out:if (new_state != cp->state) {//状态迁移了struct ip_vs_dest *dest = cp->dest;if (dest) {// 连接的目的服务器存在//如果连接是以前是活动的,新状态不是TCP连接建立好时, 将连接标志改为非活动连接,修改计数器if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && (new_state != IP_VS_TCP_S_ESTABLISHED)) {atomic_dec(&dest->activeconns);atomic_inc(&dest->inactconns);cp->flags |= IP_VS_CONN_F_INACTIVE;//如果连接以前是不活动的,新状态是TCP连接建立好时, 将连接标志改为活动连接,修改计数器} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && (new_state == IP_VS_TCP_S_ESTABLISHED)) {atomic_inc(&dest->activeconns);atomic_dec(&dest->inactconns);cp->flags &= ~IP_VS_CONN_F_INACTIVE;}}}cp->timeout = pp->timeout_table[cp->state = new_state];// 更新连接超时}static inline int tcp_state_idx(struct tcphdr *th){if (th->rst)return 3;if (th->syn)return 0;if (th->fin)return 1;if (th->ack)return 2;return -1;}#define TCP_DIR_INPUT            0#define TCP_DIR_OUTPUT          4#define TCP_DIR_INPUT_ONLY      8static const int tcp_state_off[IP_VS_DIR_LAST] = {[IP_VS_DIR_INPUT]                =       TCP_DIR_INPUT,[IP_VS_DIR_OUTPUT]              =       TCP_DIR_OUTPUT,[IP_VS_DIR_INPUT_ONLY]          =       TCP_DIR_INPUT_ONLY,};IPVS的TCP状态转换表:static struct tcp_states_t tcp_states [] = {/* INPUT *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }},/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }},/* OUTPUT *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }},/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},/* INPUT-ONLY *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},};这个状态转换表的前两个数组和2.4内核中的TCP转换表类似,少了“none”类型标志,不过从表中数据看是INPUT对应REPLY方向,OUTPUT对应ORIGINAL方向,这个有点怪,好象是IPVS站在就是服务器本身的角度看状态,而不是象netfilter是站在中间人的角度, 数组的查看方法和netfilter相同:对于三次握手, 刚开始连接状态是sNO,来了个SYN包后, IPVS就觉得自己是服务器,状态就变为sSR而不是sSS,如果是NAT模式SYNACK返回通过IPVS时,状态仍然是sSR, 等第3个ACK来时转为sES。IPVS还有另一个状态转换表,相对更严格一些,也安全一些:static struct tcp_states_t tcp_states_dos [] = {/* INPUT *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},/*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }},/*ack*/ {{sCL, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }},/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},/* OUTPUT *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }},/*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }},/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }},/*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }},/* INPUT-ONLY *//*        sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA *//*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }},/*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }},/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }},/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},};本函数实现将多连接应用协议处理模块和IPVS连接进行绑定static int tcp_app_conn_bind(struct ip_vs_conn *cp){int hash;struct ip_vs_app *inc;int result = 0;// 只在NAT模式下处理if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)return 0;// 计算一下目的端口的HASHhash = tcp_app_hashkey(cp->vport);spin_lock(&tcp_app_lock);list_for_each_entry(inc, &tcp_apps[hash], p_list) {if (inc->port == cp->vport) {// 根据端口找到相应的应用模块if (unlikely(!ip_vs_app_inc_get(inc)))// 增加模块引用计数break;spin_unlock(&tcp_app_lock);cp->app = inc;// 将连接的应用模块指针指向改应用模块if (inc->init_conn)result = inc->init_conn(inc, cp);//初始化应用模块goto out;}}spin_unlock(&tcp_app_lock);out:return result;}timeout_change函数用来变化协议连接的超时,具体就是TCP有两个超时表,用哪个表由本函数决定:flags参数是由ipvsadm配置时传递来的。static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags){int on = (flags & 1);           /* secure_tcp *//*** FIXME: change secure_tcp to independent sysctl var** or make it per-service or per-app because it is valid** for most if not for all of the applications. Something** like "capabilities" (flags) for each object.*/tcp_state_table = (on ? tcp_states_dos : tcp_states);}该函数在ipvsadm设置相关命令时调用static int tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to){return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST, tcp_state_name_table, sname, to);}int ip_vs_set_state_timeout(int *table, int num, char **names, char *name, int to){int i;if (!table || !name || !to)return -EINVAL;//根据状态名称查找在状态在超时表中的位置然后修改超时时间,超时参数to单位为秒for (i = 0; i < num; i++) {if (strcmp(names[i], name))continue;table[i] = to * HZ;return 0;}return -ENOENT;}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  IPVS iptables