aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2008-09-11 22:59:42 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-24 18:54:59 -0400
commitf08482766b7e3c0b2aaac4b68b30f33a91703aa3 (patch)
treef7f9d9afc40405e3aa878867ebee7a8b3f23220e /drivers/net/ixgbe/ixgbe_main.c
parentf6af803f0b7c8e46d72156b042e105b4d481b6c3 (diff)
ixgbe: add clean rx many routine
in some configurations there can be more than one rx queue per vector in msi-x mode. Add functionality to be able to clean this without changing the performance path single-rx-queue cleanup. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c62
1 files changed, 60 insertions, 2 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index cde5d5a5a9ab..e18afa4e195f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1024,13 +1024,15 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1024 * @napi: napi struct with our devices info in it 1024 * @napi: napi struct with our devices info in it
1025 * @budget: amount of work driver is allowed to do this pass, in packets 1025 * @budget: amount of work driver is allowed to do this pass, in packets
1026 * 1026 *
1027 * This function is optimized for cleaning one queue only on a single
1028 * q_vector!!!
1027 **/ 1029 **/
1028static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1030static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1029{ 1031{
1030 struct ixgbe_q_vector *q_vector = 1032 struct ixgbe_q_vector *q_vector =
1031 container_of(napi, struct ixgbe_q_vector, napi); 1033 container_of(napi, struct ixgbe_q_vector, napi);
1032 struct ixgbe_adapter *adapter = q_vector->adapter; 1034 struct ixgbe_adapter *adapter = q_vector->adapter;
1033 struct ixgbe_ring *rx_ring; 1035 struct ixgbe_ring *rx_ring = NULL;
1034 int work_done = 0; 1036 int work_done = 0;
1035 long r_idx; 1037 long r_idx;
1036 1038
@@ -1055,6 +1057,56 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1055 return work_done; 1057 return work_done;
1056} 1058}
1057 1059
1060/**
1061 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1062 * @napi: napi struct with our devices info in it
1063 * @budget: amount of work driver is allowed to do this pass, in packets
1064 *
1065 * This function will clean more than one rx queue associated with a
1066 * q_vector.
1067 **/
1068static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1069{
1070 struct ixgbe_q_vector *q_vector =
1071 container_of(napi, struct ixgbe_q_vector, napi);
1072 struct ixgbe_adapter *adapter = q_vector->adapter;
1073 struct net_device *netdev = adapter->netdev;
1074 struct ixgbe_ring *rx_ring = NULL;
1075 int work_done = 0, i;
1076 long r_idx;
1077 u16 enable_mask = 0;
1078
1079 /* attempt to distribute budget to each queue fairly, but don't allow
1080 * the budget to go below 1 because we'll exit polling */
1081 budget /= (q_vector->rxr_count ?: 1);
1082 budget = max(budget, 1);
1083 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1084 for (i = 0; i < q_vector->rxr_count; i++) {
1085 rx_ring = &(adapter->rx_ring[r_idx]);
1086#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1087 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1088 ixgbe_update_rx_dca(adapter, rx_ring);
1089#endif
1090 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1091 enable_mask |= rx_ring->v_idx;
1092 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1093 r_idx + 1);
1094 }
1095
1096 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1097 rx_ring = &(adapter->rx_ring[r_idx]);
1098 /* If all Rx work done, exit the polling mode */
1099 if ((work_done == 0) || !netif_running(netdev)) {
1100 netif_rx_complete(netdev, napi);
1101 if (adapter->itr_setting & 3)
1102 ixgbe_set_itr_msix(q_vector);
1103 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1105 return 0;
1106 }
1107
1108 return work_done;
1109}
1058static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1110static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1059 int r_idx) 1111 int r_idx)
1060{ 1112{
@@ -1813,10 +1865,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1813 q_vectors = 1; 1865 q_vectors = 1;
1814 1866
1815 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1867 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1868 struct napi_struct *napi;
1816 q_vector = &adapter->q_vector[q_idx]; 1869 q_vector = &adapter->q_vector[q_idx];
1817 if (!q_vector->rxr_count) 1870 if (!q_vector->rxr_count)
1818 continue; 1871 continue;
1819 napi_enable(&q_vector->napi); 1872 napi = &q_vector->napi;
1873 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1874 (q_vector->rxr_count > 1))
1875 napi->poll = &ixgbe_clean_rxonly_many;
1876
1877 napi_enable(napi);
1820 } 1878 }
1821} 1879}
1822 1880