aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-11-15 13:15:13 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-16 13:40:58 -0500
commit364b6055738b4c752c30ccaaf25c624e69d76195 (patch)
tree4992c104a968cc42db4e45151426388845903ad3 /net/core/dev.c
parent21cb84c48ca0619181106f0f44f3802a989de024 (diff)
net: busy-poll: return busypolling status to drivers
NAPI drivers use napi_complete_done() or napi_complete() when they drained RX ring and right before re-enabling device interrupts. In busy polling, we can avoid interrupts being delivered since we are polling RX ring in a controlled loop. Drivers can chose to use napi_complete_done() return value to reduce interrupts overhead while busy polling is active. This is optional, legacy drivers should work fine even if not updated. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Willem de Bruijn <willemb@google.com> Cc: Adam Belay <abelay@google.com> Cc: Tariq Toukan <tariqt@mellanox.com> Cc: Yuval Mintz <Yuval.Mintz@cavium.com> Cc: Ariel Elior <ariel.elior@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 369dcc8efc01..edba9efeb2e9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4898,7 +4898,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
4898} 4898}
4899EXPORT_SYMBOL(__napi_schedule_irqoff); 4899EXPORT_SYMBOL(__napi_schedule_irqoff);
4900 4900
4901void __napi_complete(struct napi_struct *n) 4901bool __napi_complete(struct napi_struct *n)
4902{ 4902{
4903 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 4903 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4904 4904
@@ -4906,15 +4906,16 @@ void __napi_complete(struct napi_struct *n)
4906 * napi_complete_done(). 4906 * napi_complete_done().
4907 */ 4907 */
4908 if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state))) 4908 if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
4909 return; 4909 return false;
4910 4910
4911 list_del_init(&n->poll_list); 4911 list_del_init(&n->poll_list);
4912 smp_mb__before_atomic(); 4912 smp_mb__before_atomic();
4913 clear_bit(NAPI_STATE_SCHED, &n->state); 4913 clear_bit(NAPI_STATE_SCHED, &n->state);
4914 return true;
4914} 4915}
4915EXPORT_SYMBOL(__napi_complete); 4916EXPORT_SYMBOL(__napi_complete);
4916 4917
4917void napi_complete_done(struct napi_struct *n, int work_done) 4918bool napi_complete_done(struct napi_struct *n, int work_done)
4918{ 4919{
4919 unsigned long flags; 4920 unsigned long flags;
4920 4921
@@ -4926,7 +4927,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
4926 */ 4927 */
4927 if (unlikely(n->state & (NAPIF_STATE_NPSVC | 4928 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
4928 NAPIF_STATE_IN_BUSY_POLL))) 4929 NAPIF_STATE_IN_BUSY_POLL)))
4929 return; 4930 return false;
4930 4931
4931 if (n->gro_list) { 4932 if (n->gro_list) {
4932 unsigned long timeout = 0; 4933 unsigned long timeout = 0;
@@ -4948,6 +4949,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
4948 __napi_complete(n); 4949 __napi_complete(n);
4949 local_irq_restore(flags); 4950 local_irq_restore(flags);
4950 } 4951 }
4952 return true;
4951} 4953}
4952EXPORT_SYMBOL(napi_complete_done); 4954EXPORT_SYMBOL(napi_complete_done);
4953 4955