您的位置:首页 > 理论基础 > 计算机网络

我对TCP CDG拥塞控制算法的改进和优化

2016-12-10 19:04 211 查看
其实这不是我的优化,我是借用了BBR之力。
        借了什么力呢?这是我一再强调的,BBR最大的共享不是为Linux贡献了一个TCP拥塞控制算法(它同时在也BSD上被实现...),而是它重构了Linux TCP的实现!借助BBR对Linux TCP实现的重构,很多之前做不到的事情,现在可以做到了。
        简而言之,BBR算法对Linux TCP实现的重构中,将以下三件事完全分离:
1.重传哪些包;
2.传输多少包;
3.实际传输。

拥塞控制算法侧重解决上述第2点问题。
-----------------------------------
CDG必须要拥塞窗口的背后默默维护一个”自己的窗口“,称为shadow_wnd,该窗口只受”实际拥塞情况“的影响,而不受”Linux TCP拥塞状态机“的影响。所以说,即便在丢包重传的Recovery时期,也必须动态维护这个shadow_wnd,使其按照Reno方式增长(或者按照CUBIC方式,随便什么方式都可以)。
        然则这在BBR之前的Linux 4.8版本之前的内核中是无法做到的。因为tcp_congestion_ops机构体中没有一个回调函数是在Recovery阶段可以被调用的到的,而你所能控制的拥塞算法只能通过tcp_congestion_ops结构体的回调来实现。
BBR将以下的逻辑引入到了Linux:
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
int flag, const struct rate_sample *rs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ca_ops->cong_control) {
icsk->icsk_ca_ops->cong_control(sk, rs);
return;
}

if (tcp_in_cwnd_reduction(sk)) {
/* Reduce cwnd if state mandates */
tcp_cwnd_reduction(sk, acked_sacked, flag);
} else if (tcp_may_raise_cwnd(sk, flag)) {
/* Advance cwnd if state allows */
tcp_cong_avoid(sk, ack, acked_sacked);
}
tcp_update_pacing_rate(sk);
}

只要实现了cong_control回调,那就就不会再调用标准的PRR算法和拥塞避免tcp_cong_avoid函数,无论在任何阶段,均调用cong_control回调。因此,我的方法是,在Recovery或者Loss状态调用cong_control回调即可!在该回调中维护CDG的shadow窗口。
        这谈何容易!BBR引入的逻辑非常粗糙,只要实现了cong_control,该函数就无条件返回。事实上正确的做法是cong_control回调有个返回值,当满足一定条件时返回,否则继续下面的逻辑。但是BBR并没有引入这些。
-----------------------------------
但是,我将其引入了。
        请看,我将tcp_input.c中的tcp_cong_control改成了下面的样子:
static void tcp_cong_control(struct sock *sk, u32 ack, u32 prior_in_flight, u32 acked_sacked,
int flag, const struct rate_sample *rs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
#ifdef BBR
if (icsk->icsk_ca_ops->cong_control) {
icsk->icsk_ca_ops->cong_control(sk, rs);
#ifdef CDG
// 以下是我添加的判断,新增了rs的flag字段,一旦置位就继续而不返回。
if (!(rs->flag & CDG_CONT))
return;
#endif
}
#endif
if (tcp_in_cwnd_reduction(sk)) {
/* Reduce cwnd if state mandates */
tcp_cwnd_reduction(sk, acked_sacked, 1);
} else if (tcp_may_raise_cwnd(sk, flag)) {
/* Advance cwnd if state allows */
tcp_cong_avoid(sk, ack, prior_in_flight);
}
tcp_update_pacing_rate(sk);
}
我添加了个判断。其实我的目的很简单,就是在Recovery状态下也能调用到CDG的逻辑,就这么简单个逻辑在不懂的人眼里显得如此高大上,在懂的人眼里显得如此傻逼...不管怎样,我做了。
-----------------------------------
以下的代码只是我对标准Linux 4.3内核CDG算法的differ,想理解代码细节的,请先阅读标准CDG代码,我虽然是个传说中有求必应的人,但那只是传说...请注意,我的目标内核是3.10内核,在我移植CDG之前,我已经移植了BBR,所以说,你最好以4.9内核为准,然而这样一来,又会对3.10内核的一些接口表示费解..这里不就不多解释了,我要说的是,想彻底逃离学院派,就必须把所有这些代码都搞清楚!不然的话,首先,你根本什么都看不懂,其次,即便你有想法,你也做不来。完整的代码我会附在本文最后。
以下是patch中几个重要函数的说明:
1.CDG的cong_control回调函数cdg_main:
static void cdg_main(struct sock *sk, struct rate_sample *rs)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct cdg *ca = inet_csk_ca(sk);

if (!shadow_grow) {
rs->flag |= CDG_CONT;
return;
}

if (icsk->icsk_ca_state != TCP_CA_Open) {
// 在重传阶段,依然要采集rtt,因为链路不问包类型,重传包也会影响网络可用容量。
if (rs->rtt_us) {
// 感谢BBR增加了rs结构体,从中可以取rtt_us
ca->rtt.min = min_not_zero(ca->rtt.min, (s32)rs->rtt_us);
ca->rtt.max = max(ca->rtt.max, (s32)rs->rtt_us);
}

if (ca->state == CDG_NONFULL && use_tolerance) {
if (!shadow_fast && (ca->ack_sack_cnt < 0 || ca->ack_sack_cnt == 0) && ca->rtt.v64) {
s32 grad = 0;

if (ca->rtt_prev.v64)
grad = tcp_cdg_grad(ca);
ca->rtt_prev = ca->rtt;
ca->ack_sack_cnt = tcp_packets_in_flight(tp);
ca->rtt.v64 = 0;
}
ca->ack_sack_cnt -= rs->acked_sacked;
if (ca->state == CDG_NONFULL || shadow_fast) {
// 如果链路未完全拥塞,那么shadow窗口便默默地帮助实际窗口占据空间,等到快速恢复结束,便可以由实际窗口可用。
tcp_cong_avoid_ai_shadow(sk, ca->shadow_wnd, rs->acked_sacked);
tp->snd_cwnd = ca->shadow_wnd;
}

rs->flag |= CDG_CONT;
}
} else {
// 为了让执行流继续,增加CDG_CONT标志。
rs->flag |= CDG_CONT;
}
}

2.状态设置回调函数cdg_state:
static void cdg_state(struct sock *sk, u8 new_state)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

if (!recovery_restore)
return;
if (new_state == TCP_CA_Open)
// 进入Open状态时,直接接管shadow窗口,这里可能会有突发问题。
tp->snd_cwnd = max(max(tp->snd_cwnd, ca->shadow_wnd), 2U);
if (new_state == TCP_CA_Loss) {
// 进入Loss状态,判断是否是噪声丢包
if (ca->state == CDG_NONFULL && use_tolerance) {
// 如果是噪声丢包,那么便恢复窗口。
tp->snd_cwnd = ca->shadow_wnd;
printk("#### cwnd:%u \n", tp->snd_cwnd);
if (loss_push)
// 如果是噪声丢包,那么在窗口内继续发送数据。
tcp_push_pending_frames(sk);
}
// 如果是拥塞丢包,那么执行原有流程。
}
}

3.UNDO函数tcp_cdg_undo_cwnd:
static u32 tcp_cdg_undo_cwnd(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
// undo到shadow窗口
return max3(2U, ca->shadow_wnd, max(tp->snd_cwnd, ca->undo_cwnd));
}

4.RTT梯度计算函数tcp_cdg_grad:
static s32 tcp_cdg_grad(struct cdg *ca)
{
// rtt在pkts_acked回调和cong_control中被采样值更新
s32 gmin = ca->rtt.min - ca->rtt_prev.min;
s32 gmax = ca->rtt.max - ca->rtt_prev.max;
s32 grad;

if (ca->gradients) {
ca->gsum.min += gmin - ca->gradients[ca->tail].min;
ca->gsum.max += gmax - ca->gradients[ca->tail].max;
ca->gradients[ca->tail].min = gmin;
ca->gradients[ca->tail].max = gmax;
ca->tail = (ca->tail + 1) & (window - 1);
gmin = ca->gsum.min;
gmax = ca->gsum.max;
}
......
/* Backoff was effectual: */
if (gmin <= -32 || gmax <= -32)
ca->backoff_cnt = 0;

if (use_tolerance) {
/* Reduce small variations to zero: */
gmin = DIV_ROUND_CLOSEST(gmin, 64);
gmax = DIV_ROUND_CLOSEST(gmax, 64);
// 注意看上一篇文章CDG模型图示的边沿触发条件。
if (gmin > 0 && gmax <= 0)
ca->state = CDG_FULL;
else if ((gmin > 0 && gmax > 0) || gmax < 0)
ca->state = CDG_NONFULL;
}
return grad;
}

我首先盲测了一下原生的CDG,Oh NO!太垃圾,比CUBIC好,高丢包率下竟然与Westwood相当,在所有这一切中,BBR始终是另类,遥不可及,在我看了Paper之后,迅速自己实现了一版,感谢BBR对Linux TCP的重构!我承认我自己只懂Reno,BIC,CUBIC,Vegas,BBR这几种算法,其它HTCP,Westwood这些我并没有详细分析过,但是无论我怎么测,我发现我的CDG(应该是我改过的CDG),一直跟BBR接近。
        CDG是什么?CDG实际上就是传统基于丢包的算法加上了一个抗噪声机制,本来基于丢包的算法就是以不断填充缓存为手段,直到填满缓存发生丢包进行减窗,然而有的时候并非拥塞的原因也会发生丢包,此时按照算法来看依然会减窗,这就大大降低了带宽的利用率。加上了这个CDG的RTT梯度抗噪声机制后,网络带宽的利用率大大提高了。然而可能会加重拥塞,所以CDG内置了backoff算法,这里就不赘述了。
-----------------------------------
tcp_cdg.c代码:
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/module.h>
#include <net/tcp.h>

#define HYSTART_ACK_TRAIN 1
#define HYSTART_DELAY 2

static int window __read_mostly = 8;
static unsigned int backoff_beta __read_mostly = 0.7071 * 1024; /* sqrt 0.5 */
static unsigned int backoff_factor __read_mostly = 42;
static unsigned int hystart_detect __read_mostly = 3;
static unsigned int use_ineff __read_mostly = 5;
static unsigned int use_shadow __read_mostly = 1;
static unsigned int backoff __read_mostly = 0;
static unsigned int use_tolerance __read_mostly = 1;
static unsigned int shadow_fast __read_mostly = 1;
static unsigned int shadow_grow __read_mostly = 1;
static unsigned int recovery_restore __read_mostly = 1;
static unsigned int loss_push __read_mostly = 1;

module_param(window, int, 0444);
MODULE_PARM_DESC(window, "gradient window size (power of two <= 256)");
module_param(backoff_beta, uint, 0644);
MODULE_PARM_DESC(backoff_beta, "backoff beta (0-1024)");
module_param(backoff_factor, uint, 0644);
MODULE_PARM_DESC(backoff_factor, "backoff probability scale factor");
module_param(hystart_detect, uint, 0644);
MODULE_PARM_DESC(hystart_detect, "use Hybrid Slow start "
"(0: disabled, 1: ACK train, 2: delay threshold, 3: both)");
module_param(use_ineff, uint, 0644);
MODULE_PARM_DESC(use_ineff, "use ineffectual backoff detection (threshold)");
module_param(use_shadow, uint, 0644);
MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
module_param(backoff, uint, 0644);
MODULE_PARM_DESC(backoff, "back");
module_param(use_tolerance, uint, 0644);
MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
module_param(shadow_fast, uint, 0644);
MODULE_PARM_DESC(shadow_fast, "back");
module_param(shadow_grow, uint, 0644);
MODULE_PARM_DESC(shadow_grow, "back");
module_param(recovery_restore, uint, 0644);
MODULE_PARM_DESC(recovery_restore, "back");
module_param(loss_push, uint, 0644);
MODULE_PARM_DESC(loss_push, "back");

struct cdg_minmax {
union {
struct {
s32 min;
s32 max;
};
u64 v64;
};
};

enum cdg_state {
CDG_UNKNOWN = 0,
CDG_NONFULL = 1,
CDG_FULL = 2,
CDG_BACKOFF = 3,
};

struct cdg {
struct cdg_minmax rtt;
struct cdg_minmax rtt_prev;
struct cdg_minmax *gradients;
struct cdg_minmax gsum;
bool gfilled;
u8 tail;
u8 state;
u8 delack;
u32 rtt_seq;
u32 undo_cwnd;
u32 shadow_wnd;
u32 snd_cwnd_cnt;
u16 backoff_cnt;
u16 sample_cnt;
s32 delay_min;
s32 ack_sack_cnt;
u32 last_ack;
u32 round_start;
};

/**
* nexp_u32 - negative base-e exponential
* @ux: x in units of micro
*
* Returns exp(ux * -1e-6) * U32_MAX.
*/
static u32 __pure nexp_u32(u32 ux)
{
static const u16 v[] = {
/* exp(-x)*65536-1 for x = 0, 0.000256, 0.000512, ... */
65535,
65518, 65501, 65468, 65401, 65267, 65001, 64470, 63422,
61378, 57484, 50423, 38795, 22965, 8047, 987, 14,
};
u32 msb = ux >> 8;
u32 res;
int i;

/* Cut off when ux >= 2^24 (actual result is <= 222/U32_MAX). */
if (msb > U16_MAX)
return 0;

/* Scale first eight bits linearly: */
res = U32_MAX - (ux & 0xff) * (U32_MAX / 1000000);

/* Obtain e^(x + y + ...) by computing e^x * e^y * ...: */
for (i = 1; msb; i++, msb >>= 1) {
u32 y = v[i & -(msb & 1)] + U32_C(1);

res = ((u64)res * y) >> 16;
}

return res;
}

/* Based on the HyStart algorithm (by Ha et al.) that is implemented in
* tcp_cubic. Differences/experimental changes:
* o Using Hayes' delayed ACK filter.
* o Using a usec clock for the ACK train.
* o Reset ACK train when application limited.
* o Invoked at any cwnd (i.e. also when cwnd < 16).
* o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh).
*/
static void tcp_cdg_hystart_update(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min);
if (ca->delay_min == 0)
return;

if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now_us = div_u64(local_clock(), NSEC_PER_USEC);

if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk, tcp_packets_in_flight(tp))) {
ca->last_ack = now_us;
ca->round_start = now_us;
} else if (before(now_us, ca->last_ack + 3000)) {
u32 base_owd = max(ca->delay_min / 2U, 125U);

ca->last_ack = now_us;
if (after(now_us, ca->round_start + base_owd)) {
tp->snd_ssthresh = tp->snd_cwnd;
return;
}
}
}

if (hystart_detect & HYSTART_DELAY) {
if (ca->sample_cnt < 8) {
ca->sample_cnt++;
} else {
s32 thresh = max(ca->delay_min + ca->delay_min / 8U,
125U);

if (ca->rtt.min > thresh) {
tp->snd_ssthresh = tp->snd_cwnd;
}
}
}
}

static s32 tcp_cdg_grad(struct cdg *ca)
{
s32 gmin = ca->rtt.min - ca->rtt_prev.min;
s32 gmax = ca->rtt.max - ca->rtt_prev.max;
s32 grad;

if (ca->gradients) {
ca->gsum.min += gmin - ca->gradients[ca->tail].min;
ca->gsum.max += gmax - ca->gradients[ca->tail].max;
ca->gradients[ca->tail].min = gmin;
ca->gradients[ca->tail].max = gmax;
ca->tail = (ca->tail + 1) & (window - 1);
gmin = ca->gsum.min;
gmax = ca->gsum.max;
}

/* We keep sums to ignore gradients during cwnd reductions;
* the paper's smoothed gradients otherwise simplify to:
* (rtt_latest - rtt_oldest) / window.
*
* We also drop division by window here.
*/
grad = gmin > 0 ? gmin : gmax;

/* Extrapolate missing values in gradient window: */
if (!ca->gfilled) {
if (!ca->gradients && window > 1)
grad *= window; /* Memory allocation failed. */
else if (ca->tail == 0)
ca->gfilled = true;
else
grad = (grad * window) / (int)ca->tail;
}

/* Backoff was effectual: */
if (gmin <= -32 || gmax <= -32)
ca->backoff_cnt = 0;

if (use_tolerance) {
/* Reduce small variations to zero: */
gmin = DIV_ROUND_CLOSEST(gmin, 64);
gmax = DIV_ROUND_CLOSEST(gmax, 64);
if (gmin > 0 && gmax <= 0)
ca->state = CDG_FULL;
else if ((gmin > 0 && gmax > 0) || gmax < 0)
ca->state = CDG_NONFULL;
}
return grad;
}

void tcp_enter_cwr_1(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);

tp->prior_ssthresh = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0;
tp->high_seq = tp->snd_nxt;
tp->tlp_high_seq = 0;
tp->snd_cwnd_cnt = 0;
tp->prior_cwnd = tp->snd_cwnd;
tp->prr_delivered = 0;
tp->prr_out = 0;
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
if (tp->ecn_flags & TCP_ECN_OK)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
tcp_set_ca_state(sk, TCP_CA_CWR);
}
}

static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

if (prandom_u32() <= nexp_u32(grad * backoff_factor))
return false;

if (use_ineff) {
ca->backoff_cnt++;
if (ca->backoff_cnt > use_ineff)
return false;
}

ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
ca->state = CDG_BACKOFF;
tcp_enter_cwr_1(sk);
return true;
}

void tcp_cong_avoid_ai_shadow(struct sock *sk, u32 w, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct cdg *ca = inet_csk_ca(sk);
if (ca->snd_cwnd_cnt >= w) {
ca->snd_cwnd_cnt = 0;
ca->shadow_wnd ++;
}

ca->snd_cwnd_cnt += acked;
if (ca->snd_cwnd_cnt >= w) {
u32 delta = ca->snd_cwnd_cnt / w;

ca->snd_cwnd_cnt -= delta * w;
ca->shadow_wnd += delta;
}
ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd_clamp);
}

/* Not called in CWR or Recovery state. */
static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_snd_cwnd;
u32 incr;

if (tp->snd_cwnd <= tp->snd_ssthresh && hystart_detect)
tcp_cdg_hystart_update(sk);

if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
s32 grad = 0;

if (ca->rtt_prev.v64)
grad = tcp_cdg_grad(ca);
ca->rtt_seq = tp->snd_nxt;
ca->rtt_prev = ca->rtt;
ca->rtt.v64 = 0;
ca->last_ack = 0;
ca->sample_cnt = 0;

if (backoff && grad > 0 && tcp_cdg_backoff(sk, grad))
return;
}

if (!tcp_is_cwnd_limited(sk, tcp_packets_in_flight(tp))) {
ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
return;
}

prior_snd_cwnd = tp->snd_cwnd;
tcp_reno_cong_avoid(sk, ack, acked);

incr = tp->snd_cwnd - prior_snd_cwnd;
ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
}

static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

if (rtt_us <= 0)
return;

/* A heuristic for filtering delayed ACKs, adapted from:
* D.A. Hayes. "Timing enhancements to the FreeBSD kernel to support
* delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
*/
if (tp->sacked_out == 0) {
if (num_acked == 1 && ca->delack) {
/* A delayed ACK is only used for the minimum if it is
* provenly lower than an existing non-zero minimum.
*/
ca->rtt.min = min(ca->rtt.min, rtt_us);
ca->delack--;
return;
} else if (num_acked > 1 && ca->delack < 5) {
ca->delack++;
}
}

ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us);
ca->rtt.max = max(ca->rtt.max, rtt_us);
}

static u32 tcp_cdg_ssthresh(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

ca->undo_cwnd = tp->snd_cwnd;
ca->snd_cwnd_cnt = 0;
ca->ack_sack_cnt = tcp_packets_in_flight(tp);

if (ca->state == CDG_BACKOFF)
return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);

if (ca->state == CDG_NONFULL && use_tolerance)
return tp->snd_cwnd;

ca->shadow_wnd = max(min(ca->shadow_wnd >> 1, tp->snd_cwnd), 2U);
if (use_shadow)
return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
return max(2U, tp->snd_cwnd >> 1);
}

static u32 tcp_cdg_undo_cwnd(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
return max3(2U, ca->shadow_wnd, max(tp->snd_cwnd, ca->undo_cwnd));
}

static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct cdg_minmax *gradients;

switch (ev) {
case CA_EVENT_CWND_RESTART:
gradients = ca->gradients;
if (gradients)
memset(gradients, 0, window * sizeof(gradients[0]));
memset(ca, 0, sizeof(*ca));

ca->gradients = gradients;
ca->rtt_seq = tp->snd_nxt;
ca->shadow_wnd = tp->snd_cwnd;
break;
case CA_EVENT_COMPLETE_CWR:
ca->state = CDG_UNKNOWN;
ca->rtt_seq = tp->snd_nxt;
ca->rtt_prev = ca->rtt;
ca->rtt.v64 = 0;
break;
default:
break;
}
}

static void tcp_cdg_init(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

/* We silently fall back to window = 1 if allocation fails. */
if (window > 1)
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
GFP_NOWAIT | __GFP_NOWARN);
ca->rtt_seq = tp->snd_nxt;
ca->shadow_wnd = tp->snd_cwnd;
ca->ack_sack_cnt = 0;
}

static void tcp_cdg_release(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);

kfree(ca->gradients);
}

static void cdg_main(struct sock *sk, struct rate_sample *rs)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct cdg *ca = inet_csk_ca(sk);

if (!shadow_grow) {
rs->flag |= CDG_CONT;
return;
}

if (icsk->icsk_ca_state != TCP_CA_Open) {
if (rs->rtt_us) {
ca->rtt.min = min_not_zero(ca->rtt.min, (s32)rs->rtt_us);
ca->rtt.max = max(ca->rtt.max, (s32)rs->rtt_us);
}

if (ca->state == CDG_NONFULL && use_tolerance) {
if (!shadow_fast && (ca->ack_sack_cnt < 0 || ca->ack_sack_cnt == 0) && ca->rtt.v64) {
s32 grad = 0;

if (ca->rtt_prev.v64)
grad = tcp_cdg_grad(ca);
ca->rtt_prev = ca->rtt;
ca->ack_sack_cnt = tcp_packets_in_flight(tp);
ca->rtt.v64 = 0;
}
ca->ack_sack_cnt -= rs->acked_sacked;
if (ca->state == CDG_NONFULL || shadow_fast) {
tcp_cong_avoid_ai_shadow(sk, ca->shadow_wnd, rs->acked_sacked);
tp->snd_cwnd = ca->shadow_wnd;
}

rs->flag |= CDG_CONT;
}
} else {
rs->flag |= CDG_CONT;
}
}

static void cdg_state(struct sock *sk, u8 new_state)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);

if (!recovery_restore)
return;
if (new_state == TCP_CA_Open)
tp->snd_cwnd = max(max(tp->snd_cwnd, ca->shadow_wnd), 2U);
if (new_state == TCP_CA_Loss) {
if (ca->state == CDG_NONFULL && use_tolerance) {
tp->snd_cwnd = ca->shadow_wnd;
if (loss_push)
tcp_push_pending_frames(sk);
}
}
}

struct tcp_congestion_ops tcp_cdg __read_mostly = {
.cong_avoid = tcp_cdg_cong_avoid,
.cong_control = cdg_main,
.set_state = cdg_state,
.cwnd_event = tcp_cdg_cwnd_event,
.pkts_acked = tcp_cdg_acked,
.undo_cwnd = tcp_cdg_undo_cwnd,
.ssthresh = tcp_cdg_ssthresh,
.release = tcp_cdg_release,
.init = tcp_cdg_init,
.owner = THIS_MODULE,
.name = "cdg",
};

static int __init tcp_cdg_register(void)
{
if (backoff_beta > 1024 || window < 1 || window > 256)
return -ERANGE;
if (!is_power_of_2(window))
return -EINVAL;

BUILD_BUG_ON(sizeof(struct cdg) > ICSK_CA_PRIV_SIZE);
tcp_register_congestion_control(&tcp_cdg);
return 0;
}

static void __exit tcp_cdg_unregister(void)
{
tcp_unregister_congestion_control(&tcp_cdg);
}

module_init(tcp_cdg_register);
module_exit(tcp_cdg_unregister);
MODULE_AUTHOR("...");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP CDG");
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: