您的位置:首页 > 其它

使用NAPI的完整例子

2017-07-18 15:19 519 查看
hns_nic_try_get_ae->hns_nic_init_ring_data

static int hns_nic_init_ring_data(struct hns_nic_priv *priv)

{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
bool is_ver1 = AE_IS_VER1(priv->enet_ver);
int i;

if (h->q_num > NIC_MAX_Q_PER_VF) {
netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
return -EINVAL;
}

priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
 GFP_KERNEL);
if (!priv->ring_data)
return -ENOMEM;

for (i = 0; i < h->q_num; i++) {
rd = &priv->ring_data[i];
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
//调用netif_napi_add将priv->netdev和NAPI的结构rd->napi绑定,注意这里的poll函数是hns_nic_common_poll
netif_napi_add(priv->netdev, &rd->napi,
      hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];
rd->queue_index = i - h->q_num;
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;

netif_napi_add(priv->netdev, &rd->napi,
      hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}

return 0;

}

hns_nic_net_open->hns_nic_net_up->hns_nic_ring_open

static int hns_nic_ring_open(struct net_device *netdev, int idx)

{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;

napi_enable(&priv->ring_data[idx].napi);

enable_irq(priv->ring_data[idx].ring->irq);
h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);

return 0;

}

在hns_nic_ring_open 中调用napi_enable来使能napi

这样当hns的中断到来是,会调用中断的处理函数

hns_nic_net_open->hns_nic_net_up->hns_nic_init_irq

static int hns_nic_init_irq(struct hns_nic_priv *priv)

{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
int i;
int ret;
int cpu;

for (i = 0; i < h->q_num * 2; i++) {
rd = &priv->ring_data[i];

if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
break;

snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
"%s-%s%d", priv->netdev->name,
(is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);

rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';

//rx和tx的中断处理函数都是hns_irq_handle
ret = request_irq(rd->ring->irq,
 hns_irq_handle, 0, rd->ring->ring_name, rd);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
  rd->ring->irq);
return ret;
}

return 0;

}

static irqreturn_t hns_irq_handle(int irq, void *dev)

{
struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;

ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 1);
napi_schedule(&ring_data->napi);

return IRQ_HANDLED;

}

在中断处理函数hns_irq_handle 中会调用napi_schedule 来调度NAPI

napi_schedule->__napi_schedule

void __napi_schedule(struct napi_struct *n)

{
unsigned long flags;

local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);

}

static inline void ____napi_schedule(struct softnet_data *sd,
    struct napi_struct *napi)

{
list_add_tail(&napi->poll_list, &sd->poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);

}

最终在____napi_schedule 通过__raise_softirq_irqoff来触发一个软件中断,这个软件中断的回调函数

是在net_dev_init 中注册的.

static int __init net_dev_init(void)

{
for_each_possible_cpu(i) {
struct work_struct *flush = per_cpu_ptr(&flush_works, i);
struct softnet_data *sd = &per_cpu(softnet_data, i);

INIT_WORK(flush, flush_backlog);

skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;

#ifdef CONFIG_RPS
sd->csd.func = rps_trigger_softirq;
sd->csd.info = sd;
sd->cpu = i;

#endif

sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
}
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);

}

net_rx_action->napi_poll

static int napi_poll(struct napi_struct *n, struct list_head *repoll)

{
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n, work, weight);
}

WARN_ON_ONCE(work > weight);

if (likely(work < weight))
goto out_unlock;

/* Drivers must not modify the NAPI state if they
* consume the entire weight.  In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
goto out_unlock;
}

}

最终还是设备自己的poll函数,如果处理完所有的包后调用napi_complete

从前面可知设备自己注册的poll函数为hns_nic_common_poll

static int hns_nic_common_poll(struct napi_struct *napi, int budget)

{
int clean_complete = 0;
struct hns_nic_ring_data *ring_data =
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;

try_again:

//再针对tx和rx分别调用自己的poll_one 函数
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);

if (clean_complete < budget) {

//如果处理完成调用napi_complete 告诉NAPI 已经处理完成了
if (ring_data->fini_process(ring_data)) {
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
goto try_again;
}
}

return clean_complete;

}

我们继续看hns_nic_rx_poll_one中会通过下面的code调用
/* do update ip stack process*/
((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
ring_data, skb);

这里的v就是hns_nic_rx_up_pro

static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
     struct sk_buff *skb)

{
struct net_device *ndev = ring_data->napi.dev;

skb->protocol = eth_type_trans(skb, ndev);
(void)napi_gro_receive(&ring_data->napi, skb);

}

 最终会在hns_nic_rx_up_pro 中调用napi_gro_receive 将在hns_nic_rx_poll_one 中准备好的skb发送给上层.
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: