aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2014-12-20 15:16:21 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-23 23:20:21 -0500
commit726ce70e9e4050409243f3a1d735dc86bc6e6e57 (patch)
treecddb92dda234adbc154b4d6cd308f244ce1c2070 /net
parent0d1644919578db525b9a7b6c8197ce02adbfce26 (diff)
net: Move napi polling code out of net_rx_action
This patch creates a new function napi_poll and moves the napi polling code from net_rx_action into it. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c98
1 files changed, 54 insertions, 44 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a989f8502412..493ae8ee569f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4557,6 +4557,59 @@ void netif_napi_del(struct napi_struct *napi)
4557} 4557}
4558EXPORT_SYMBOL(netif_napi_del); 4558EXPORT_SYMBOL(netif_napi_del);
4559 4559
4560static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4561{
4562 void *have;
4563 int work, weight;
4564
4565 list_del_init(&n->poll_list);
4566
4567 have = netpoll_poll_lock(n);
4568
4569 weight = n->weight;
4570
4571 /* This NAPI_STATE_SCHED test is for avoiding a race
4572 * with netpoll's poll_napi(). Only the entity which
4573 * obtains the lock and sees NAPI_STATE_SCHED set will
4574 * actually make the ->poll() call. Therefore we avoid
4575 * accidentally calling ->poll() when NAPI is not scheduled.
4576 */
4577 work = 0;
4578 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4579 work = n->poll(n, weight);
4580 trace_napi_poll(n);
4581 }
4582
4583 WARN_ON_ONCE(work > weight);
4584
4585 if (likely(work < weight))
4586 goto out_unlock;
4587
4588 /* Drivers must not modify the NAPI state if they
4589 * consume the entire weight. In such cases this code
4590 * still "owns" the NAPI instance and therefore can
4591 * move the instance around on the list at-will.
4592 */
4593 if (unlikely(napi_disable_pending(n))) {
4594 napi_complete(n);
4595 goto out_unlock;
4596 }
4597
4598 if (n->gro_list) {
4599 /* flush too old packets
4600 * If HZ < 1000, flush all packets.
4601 */
4602 napi_gro_flush(n, HZ >= 1000);
4603 }
4604
4605 list_add_tail(&n->poll_list, repoll);
4606
4607out_unlock:
4608 netpoll_poll_unlock(have);
4609
4610 return work;
4611}
4612
4560static void net_rx_action(struct softirq_action *h) 4613static void net_rx_action(struct softirq_action *h)
4561{ 4614{
4562 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4615 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,7 +4617,6 @@ static void net_rx_action(struct softirq_action *h)
4564 int budget = netdev_budget; 4617 int budget = netdev_budget;
4565 LIST_HEAD(list); 4618 LIST_HEAD(list);
4566 LIST_HEAD(repoll); 4619 LIST_HEAD(repoll);
4567 void *have;
4568 4620
4569 local_irq_disable(); 4621 local_irq_disable();
4570 list_splice_init(&sd->poll_list, &list); 4622 list_splice_init(&sd->poll_list, &list);
@@ -4572,7 +4624,6 @@ static void net_rx_action(struct softirq_action *h)
4572 4624
4573 while (!list_empty(&list)) { 4625 while (!list_empty(&list)) {
4574 struct napi_struct *n; 4626 struct napi_struct *n;
4575 int work, weight;
4576 4627
4577 /* If softirq window is exhausted then punt. 4628 /* If softirq window is exhausted then punt.
4578 * Allow this to run for 2 jiffies since which will allow 4629 * Allow this to run for 2 jiffies since which will allow
@@ -4583,48 +4634,7 @@ static void net_rx_action(struct softirq_action *h)
4583 4634
4584 4635
4585 n = list_first_entry(&list, struct napi_struct, poll_list); 4636 n = list_first_entry(&list, struct napi_struct, poll_list);
4586 list_del_init(&n->poll_list); 4637 budget -= napi_poll(n, &repoll);
4587
4588 have = netpoll_poll_lock(n);
4589
4590 weight = n->weight;
4591
4592 /* This NAPI_STATE_SCHED test is for avoiding a race
4593 * with netpoll's poll_napi(). Only the entity which
4594 * obtains the lock and sees NAPI_STATE_SCHED set will
4595 * actually make the ->poll() call. Therefore we avoid
4596 * accidentally calling ->poll() when NAPI is not scheduled.
4597 */
4598 work = 0;
4599 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4600 work = n->poll(n, weight);
4601 trace_napi_poll(n);
4602 }
4603
4604 WARN_ON_ONCE(work > weight);
4605
4606 budget -= work;
4607
4608 /* Drivers must not modify the NAPI state if they
4609 * consume the entire weight. In such cases this code
4610 * still "owns" the NAPI instance and therefore can
4611 * move the instance around on the list at-will.
4612 */
4613 if (unlikely(work == weight)) {
4614 if (unlikely(napi_disable_pending(n))) {
4615 napi_complete(n);
4616 } else {
4617 if (n->gro_list) {
4618 /* flush too old packets
4619 * If HZ < 1000, flush all packets.
4620 */
4621 napi_gro_flush(n, HZ >= 1000);
4622 }
4623 list_add_tail(&n->poll_list, &repoll);
4624 }
4625 }
4626
4627 netpoll_poll_unlock(have);
4628 } 4638 }
4629 4639
4630 if (!sd_has_rps_ipi_waiting(sd) && 4640 if (!sd_has_rps_ipi_waiting(sd) &&