aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEliezer Tamir <eliezer.tamir@linux.intel.com>2013-06-10 04:40:20 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-11 00:22:36 -0400
commit5a85e737f30ce7b939a34d93cca816400342208c (patch)
tree6b1b3d2970b60c8733a3f5e253fb463e1f6d9f0f
parentd30e383bb856f614ddb5bbbb5a7d3f86240e41ec (diff)
ixgbe: add support for ndo_ll_poll
Add the ixgbe driver code implementing ndo_ll_poll. Adds ndo_ll_poll method and locking between it and the napi poll. When receiving a packet we use skb_mark_ll to record the napi it came from. Add each napi to the napi_hash right after netif_napi_add(). Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c63
3 files changed, 177 insertions, 8 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ca932387a80f..e9d98629b583 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,6 +52,8 @@
52#include <linux/dca.h> 52#include <linux/dca.h>
53#endif 53#endif
54 54
55#include <net/ll_poll.h>
56
55/* common prefix used by pr_<> macros */ 57/* common prefix used by pr_<> macros */
56#undef pr_fmt 58#undef pr_fmt
57#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 59#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -356,9 +358,127 @@ struct ixgbe_q_vector {
356 struct rcu_head rcu; /* to avoid race with update stats on free */ 358 struct rcu_head rcu; /* to avoid race with update stats on free */
357 char name[IFNAMSIZ + 9]; 359 char name[IFNAMSIZ + 9];
358 360
361#ifdef CONFIG_NET_LL_RX_POLL
362 unsigned int state;
363#define IXGBE_QV_STATE_IDLE 0
364#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
365#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
366#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
367#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
368#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
369#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
370#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
371 spinlock_t lock;
372#endif /* CONFIG_NET_LL_RX_POLL */
373
359 /* for dynamic allocation of rings associated with this q_vector */ 374 /* for dynamic allocation of rings associated with this q_vector */
360 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 375 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
361}; 376};
377#ifdef CONFIG_NET_LL_RX_POLL
378static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
379{
380
381 spin_lock_init(&q_vector->lock);
382 q_vector->state = IXGBE_QV_STATE_IDLE;
383}
384
385/* called from the device poll routine to get ownership of a q_vector */
386static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
387{
388 int rc = true;
389 spin_lock(&q_vector->lock);
390 if (q_vector->state & IXGBE_QV_LOCKED) {
391 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
392 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
393 rc = false;
394 } else
395 /* we don't care if someone yielded */
396 q_vector->state = IXGBE_QV_STATE_NAPI;
397 spin_unlock(&q_vector->lock);
398 return rc;
399}
400
401/* returns true is someone tried to get the qv while napi had it */
402static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
403{
404 int rc = false;
405 spin_lock(&q_vector->lock);
406 WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
407 IXGBE_QV_STATE_NAPI_YIELD));
408
409 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
410 rc = true;
411 q_vector->state = IXGBE_QV_STATE_IDLE;
412 spin_unlock(&q_vector->lock);
413 return rc;
414}
415
416/* called from ixgbe_low_latency_poll() */
417static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
418{
419 int rc = true;
420 spin_lock_bh(&q_vector->lock);
421 if ((q_vector->state & IXGBE_QV_LOCKED)) {
422 q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
423 rc = false;
424 } else
425 /* preserve yield marks */
426 q_vector->state |= IXGBE_QV_STATE_POLL;
427 spin_unlock_bh(&q_vector->lock);
428 return rc;
429}
430
431/* returns true if someone tried to get the qv while it was locked */
432static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
433{
434 int rc = false;
435 spin_lock_bh(&q_vector->lock);
436 WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
437
438 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
439 rc = true;
440 q_vector->state = IXGBE_QV_STATE_IDLE;
441 spin_unlock_bh(&q_vector->lock);
442 return rc;
443}
444
445/* true if a socket is polling, even if it did not get the lock */
446static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
447{
448 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
449 return q_vector->state & IXGBE_QV_USER_PEND;
450}
451#else /* CONFIG_NET_LL_RX_POLL */
452static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
453{
454}
455
456static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
457{
458 return true;
459}
460
461static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
462{
463 return false;
464}
465
466static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
467{
468 return false;
469}
470
471static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
472{
473 return false;
474}
475
476static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
477{
478 return false;
479}
480#endif /* CONFIG_NET_LL_RX_POLL */
481
362#ifdef CONFIG_IXGBE_HWMON 482#ifdef CONFIG_IXGBE_HWMON
363 483
364#define IXGBE_HWMON_TYPE_LOC 0 484#define IXGBE_HWMON_TYPE_LOC 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ef5f7a678ce1..90b4e1089ecc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
811 /* initialize NAPI */ 811 /* initialize NAPI */
812 netif_napi_add(adapter->netdev, &q_vector->napi, 812 netif_napi_add(adapter->netdev, &q_vector->napi,
813 ixgbe_poll, 64); 813 ixgbe_poll, 64);
814 napi_hash_add(&q_vector->napi);
814 815
815 /* tie q_vector and adapter together */ 816 /* tie q_vector and adapter together */
816 adapter->q_vector[v_idx] = q_vector; 817 adapter->q_vector[v_idx] = q_vector;
@@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
931 adapter->rx_ring[ring->queue_index] = NULL; 932 adapter->rx_ring[ring->queue_index] = NULL;
932 933
933 adapter->q_vector[v_idx] = NULL; 934 adapter->q_vector[v_idx] = NULL;
935 napi_hash_del(&q_vector->napi);
934 netif_napi_del(&q_vector->napi); 936 netif_napi_del(&q_vector->napi);
935 937
936 /* 938 /*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d30fbdd81fca..9a7dc405e7ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1504{ 1504{
1505 struct ixgbe_adapter *adapter = q_vector->adapter; 1505 struct ixgbe_adapter *adapter = q_vector->adapter;
1506 1506
1507 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1507 if (ixgbe_qv_ll_polling(q_vector))
1508 netif_receive_skb(skb);
1509 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1508 napi_gro_receive(&q_vector->napi, skb); 1510 napi_gro_receive(&q_vector->napi, skb);
1509 else 1511 else
1510 netif_rx(skb); 1512 netif_rx(skb);
@@ -1892,9 +1894,9 @@ dma_sync:
1892 * expensive overhead for IOMMU access this provides a means of avoiding 1894 * expensive overhead for IOMMU access this provides a means of avoiding
1893 * it by maintaining the mapping of the page to the syste. 1895 * it by maintaining the mapping of the page to the syste.
1894 * 1896 *
1895 * Returns true if all work is completed without reaching budget 1897 * Returns amount of work completed
1896 **/ 1898 **/
1897static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1899static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1898 struct ixgbe_ring *rx_ring, 1900 struct ixgbe_ring *rx_ring,
1899 const int budget) 1901 const int budget)
1900{ 1902{
@@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1976 } 1978 }
1977 1979
1978#endif /* IXGBE_FCOE */ 1980#endif /* IXGBE_FCOE */
1981 skb_mark_ll(skb, &q_vector->napi);
1979 ixgbe_rx_skb(q_vector, skb); 1982 ixgbe_rx_skb(q_vector, skb);
1980 1983
1981 /* update budget accounting */ 1984 /* update budget accounting */
@@ -1992,9 +1995,37 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1992 if (cleaned_count) 1995 if (cleaned_count)
1993 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1996 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1994 1997
1995 return (total_rx_packets < budget); 1998 return total_rx_packets;
1996} 1999}
1997 2000
2001#ifdef CONFIG_NET_LL_RX_POLL
2002/* must be called with local_bh_disable()d */
2003static int ixgbe_low_latency_recv(struct napi_struct *napi)
2004{
2005 struct ixgbe_q_vector *q_vector =
2006 container_of(napi, struct ixgbe_q_vector, napi);
2007 struct ixgbe_adapter *adapter = q_vector->adapter;
2008 struct ixgbe_ring *ring;
2009 int found = 0;
2010
2011 if (test_bit(__IXGBE_DOWN, &adapter->state))
2012 return LL_FLUSH_FAILED;
2013
2014 if (!ixgbe_qv_lock_poll(q_vector))
2015 return LL_FLUSH_BUSY;
2016
2017 ixgbe_for_each_ring(ring, q_vector->rx) {
2018 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2019 if (found)
2020 break;
2021 }
2022
2023 ixgbe_qv_unlock_poll(q_vector);
2024
2025 return found;
2026}
2027#endif /* CONFIG_NET_LL_RX_POLL */
2028
1998/** 2029/**
1999 * ixgbe_configure_msix - Configure MSI-X hardware 2030 * ixgbe_configure_msix - Configure MSI-X hardware
2000 * @adapter: board private structure 2031 * @adapter: board private structure
@@ -2550,6 +2581,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2550 ixgbe_for_each_ring(ring, q_vector->tx) 2581 ixgbe_for_each_ring(ring, q_vector->tx)
2551 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); 2582 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2552 2583
2584 if (!ixgbe_qv_lock_napi(q_vector))
2585 return budget;
2586
2553 /* attempt to distribute budget to each queue fairly, but don't allow 2587 /* attempt to distribute budget to each queue fairly, but don't allow
2554 * the budget to go below 1 because we'll exit polling */ 2588 * the budget to go below 1 because we'll exit polling */
2555 if (q_vector->rx.count > 1) 2589 if (q_vector->rx.count > 1)
@@ -2558,9 +2592,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2558 per_ring_budget = budget; 2592 per_ring_budget = budget;
2559 2593
2560 ixgbe_for_each_ring(ring, q_vector->rx) 2594 ixgbe_for_each_ring(ring, q_vector->rx)
2561 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, 2595 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2562 per_ring_budget); 2596 per_ring_budget) < per_ring_budget);
2563 2597
2598 ixgbe_qv_unlock_napi(q_vector);
2564 /* If all work not completed, return budget and keep polling */ 2599 /* If all work not completed, return budget and keep polling */
2565 if (!clean_complete) 2600 if (!clean_complete)
2566 return budget; 2601 return budget;
@@ -3747,16 +3782,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3747{ 3782{
3748 int q_idx; 3783 int q_idx;
3749 3784
3750 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 3785 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3786 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
3751 napi_enable(&adapter->q_vector[q_idx]->napi); 3787 napi_enable(&adapter->q_vector[q_idx]->napi);
3788 }
3752} 3789}
3753 3790
3754static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 3791static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3755{ 3792{
3756 int q_idx; 3793 int q_idx;
3757 3794
3758 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) 3795 local_bh_disable(); /* for ixgbe_qv_lock_napi() */
3796 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3759 napi_disable(&adapter->q_vector[q_idx]->napi); 3797 napi_disable(&adapter->q_vector[q_idx]->napi);
3798 while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
3799 pr_info("QV %d locked\n", q_idx);
3800 mdelay(1);
3801 }
3802 }
3803 local_bh_enable();
3760} 3804}
3761 3805
3762#ifdef CONFIG_IXGBE_DCB 3806#ifdef CONFIG_IXGBE_DCB
@@ -7177,6 +7221,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7177#ifdef CONFIG_NET_POLL_CONTROLLER 7221#ifdef CONFIG_NET_POLL_CONTROLLER
7178 .ndo_poll_controller = ixgbe_netpoll, 7222 .ndo_poll_controller = ixgbe_netpoll,
7179#endif 7223#endif
7224#ifdef CONFIG_NET_LL_RX_POLL
7225 .ndo_ll_poll = ixgbe_low_latency_recv,
7226#endif
7180#ifdef IXGBE_FCOE 7227#ifdef IXGBE_FCOE
7181 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 7228 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7182 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, 7229 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,